source
stringlengths
3
92
c
stringlengths
26
2.25M
SoaDistanceTableBA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_BA_H #define QMCPLUSPLUS_DTDIMPL_BA_H namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for AB using a transposed form */ template<typename T, unsigned D, int SC> struct SoaDistanceTableBA: public DTD_BConds<T,D,SC>, public DistanceTableData { int Nsources; int Ntargets; int BlockSize; SoaDistanceTableBA(const ParticleSet& source, ParticleSet& target) : DTD_BConds<T,D,SC>(source.Lattice), DistanceTableData(source,target) { resize(source.getTotalNum(),target.getTotalNum()); } void resize(int ns, int nt) { N[SourceIndex]=Nsources=ns; N[VisitorIndex]=Ntargets=nt; if(Nsources*Ntargets==0) return; int Ntargets_padded=getAlignedSize<T>(Ntargets); int Nsources_padded=getAlignedSize<T>(Nsources); Distances.resize(Ntargets,Nsources_padded); BlockSize=Nsources_padded*D; memoryPool.resize(Ntargets*BlockSize); Displacements.resize(Ntargets); for(int i=0; i<Ntargets; ++i) Displacements[i].attachReference(Nsources,Nsources_padded,memoryPool.data()+i*BlockSize); Temp_r.resize(Nsources); Temp_dr.resize(Nsources); } #if (__cplusplus >= 201103L) SoaDistanceTableBA()=delete; SoaDistanceTableBA(const SoaDistanceTableBA&)=delete; #endif ~SoaDistanceTableBA() {} /** evaluate the full table */ inline void evaluate(ParticleSet& P) { #pragma omp parallel { int first, last; FairDivideAligned(Nsources, getAlignment<T>(), omp_get_num_threads(), omp_get_thread_num(), first, last); //be aware of the sign of Displacement for(int iat=0; iat<Ntargets; ++iat) DTD_BConds<T,D,SC>::computeDistances(P.R[iat],Origin->RSoA, Distances[iat], Displacements[iat], first, last); } } /** evaluate the iat-row with the current position * * Fill Temp_r and Temp_dr and copy them Distances & Displacements */ inline void evaluate(ParticleSet& P, IndexType iat) { DTD_BConds<T,D,SC>::computeDistances(P.R[iat], Origin->RSoA, Distances[iat],Displacements[iat], 0, Nsources); } inline void moveOnSphere(const ParticleSet& P, const PosType& rnew) { DTD_BConds<T,D,SC>::computeDistances(rnew, Origin->RSoA, Temp_r.data(),Temp_dr, 0, Nsources); } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew) { DTD_BConds<T,D,SC>::computeDistances(rnew, Origin->RSoA, Temp_r.data(),Temp_dr, 0, Nsources); } ///update the stripe for jat-th particle inline void update(IndexType iat) { simd::copy_n(Temp_r.data(),Nsources,Distances[iat]); for(int idim=0;idim<D; ++idim) simd::copy_n(Temp_dr.data(idim),Nsources,Displacements[iat].data(idim)); } size_t get_neighbors(int iat, RealType rcut, int* restrict jid, RealType* restrict dist, PosType* restrict displ) const { constexpr T cminus(-1); size_t nn=0; for(int jat=0; jat<Ntargets; ++jat) { const RealType rij=Distances[jat][iat]; if(rij<rcut) {//make the compact list jid[nn]=jat; dist[nn]=rij; displ[nn]=cminus*Displacements[jat][iat]; nn++; } } return nn; } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const { RealType min_dist = std::numeric_limits<RealType>::max(); int index=-1; if(newpos) { for(int jat=0; jat<Nsources; ++jat) if(Temp_r[jat]<min_dist) { min_dist = Temp_r[jat]; index = jat; } if(index>=0) { r=min_dist; dr=Temp_dr[index]; } } else { for(int jat=0; jat<Nsources; ++jat) if(Distances[iat][jat]<min_dist) { min_dist = Distances[iat][jat]; index = jat; } if(index>=0) { r=min_dist; dr=Displacements[iat][index]; } } return index; } size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const { size_t nn=0; for(int jat=0; jat<Ntargets; ++jat) { const RealType rij=Distances[jat][iat]; if(rij<rcut) {//make the compact list dist[nn]=rij; nn++; } } return nn; } }; } #endif
psr_profile.c
/***************************************************************************** * PSRGEOM * Sam McSweeney, 2018 * * This program attempts to simulate the profile as observed from a pulsar * with given angles α and ζ, period P, and frequency range f1 to f2. * * For each sampled field line, the line is followed outward in incremental * steps. At each step, the average beam pattern of emitting particles is * calculated, with the gamma factors of the particles drawn from a pre- * determined distribution. * * The final output is a two-column file: * 1) the rotation phase in degrees * 2) the profile power (in arbitrary units) * ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <time.h> //#include <omp.h> #include "psrgeom.h" struct opts { double al_deg; // alpha angle in deg double ze_deg; // zeta angle in deg double P_sec; // period, in sec char *outfile; // name of output file (NULL means stdout) double s_start; // starting value of s double s_stop; // stopping value of s double p_start; // starting value of p double p_stop; // stopping value of p double f_start; // starting value of freq (MHz) double f_stop; // stopping value of freq (MHz) int open_only; // only consider open field lines int num_lines; // sample this many lines int nsparks; // number of sparks in carousel int dipole; // use dipole field? int nbins; // number of profile phase bins double P4_sec; // the rotation time of the carousel int csl_type; // the type of spark profile (TOPHAT or GAUSSIAN) }; void usage(); void parse_cmd_line( int argc, char *argv[], struct opts *o ); void print_col_headers( FILE *f ); int main( int argc, char *argv[] ) { // Seed the random number generator srand( time( NULL ) ); // Generic counter: int i; // Set up struct for command line options and set default values struct opts o; o.al_deg = NAN; o.P_sec = NAN; o.ze_deg = NAN; o.outfile = NULL; o.s_start = NAN; o.s_stop = NAN; o.p_start = 0.0; o.p_stop = 360.0; o.f_start = NAN; o.f_stop = NAN; o.open_only = 0; o.num_lines = 10000; o.nsparks = 0; o.dipole = 0; // use Deutsch field by default o.nbins = 1024; o.P4_sec = NAN; o.csl_type = GAUSSIAN; parse_cmd_line( argc, argv, &o ); // Set up output file FILE *f; if (o.outfile == NULL) f = stdout; else { f = fopen( o.outfile, "w" ); if (f == NULL) { fprintf( stderr, "error: could not open file %s\n", o.outfile ); exit(EXIT_FAILURE); } } // Set up pulsar pulsar psr; psr_angle *ra = NULL; psr_angle *dec = NULL; psr_angle *al = create_psr_angle_deg( o.al_deg ); psr_angle *ze = create_psr_angle_deg( o.ze_deg ); double P = o.P_sec; double r = 1e4; /* This will be used later as we move outwards from the pulsar surface */ set_pulsar( &psr, ra, dec, P, r, al, ze ); if (o.dipole) psr.field_type = DIPOLE; // Set up the carousel psr_angle s, S; set_psr_angle_deg( &S, (o.s_stop + o.s_start) / 2.0 ); set_psr_angle_deg( &s, (o.s_stop - o.s_start) / 2.0 ); set_pulsar_carousel( &psr, o.nsparks, &s, &S, o.csl_type, o.P4_sec ); // Write the file header print_psrg_header( f, argc, argv ); // Some needed variables double profile[o.nbins]; int bin_count[o.nbins]; int centre_bin = o.nbins/2; // Some default values int rL_norm = 0; // Reset profile to zero for (i = 0; i < o.nbins; i++) { profile[i] = 0.0; bin_count[i] = 0; } // Write the column headers print_col_headers( f ); //#pragma omp parallel for /* At the moment, this doesn't seem to help */ for (i = 0; i < o.num_lines; i++) { fprintf( stderr, "\r\r\r\r%3d%%", (int)(100.0*(double)i/(double)(o.num_lines-1)) ); int linetype; // either CLOSED_LINE or OPEN_LINE point foot_pt; point init_pt; // Obtain a random point on the pulsar surface random_spark_footpoint( &foot_pt, NULL, &psr, 0.0 ); // If requested, check that we're on an open field line if (o.open_only) { linetype = get_fieldline_type( &foot_pt, &psr, rL_norm, NULL, NULL, NULL ); if (linetype == CLOSED_LINE) { continue; } } // Now climb up the field line, emitting as we go // Start 1 metre above the surface Bstep( &foot_pt, &psr, 1.0, DIR_OUTWARD, &init_pt, NULL ); set_point_xyz( &init_pt, init_pt.x[0], init_pt.x[1], init_pt.x[2], POINT_SET_ALL ); fieldline_to_profile( &psr, &init_pt, o.f_start*1.0e6, o.f_stop*1.0e6, o.nbins, centre_bin, profile, bin_count ); } fprintf( stderr, "\n" ); // Print out the profile double phase_deg; double bin_width = 360.0 / (double)o.nbins; for (i = 0; i < o.nbins; i++) { // Convert bin number to phase phase_deg = (double)(i - centre_bin) * bin_width; fprintf( f, "%.15e %.15e %d\n", phase_deg, profile[i], bin_count[i] ); } // Clean up destroy_psr_angle( ra ); destroy_psr_angle( dec ); destroy_psr_angle( al ); destroy_psr_angle( ze ); free( o.outfile ); if (o.outfile != NULL) fclose( f ); return 0; } void usage() { printf( "usage: psr_visiblepoints [OPTIONS]\n\n" ); printf( "REQUIRED OPTIONS:\n" ); printf( " -a alpha The angle between the rotation and magetic axes " "in degrees (required)\n" ); printf( " -f f1:f2 The emission frequency, in MHz. " "The range is from f1 to f2.\n" ); printf( " -P period The rotation period of the pulsar, in seconds " "(required)\n" ); printf( " -s s1:s2 The angular distance from the magnetic axis, " "in degrees. The range is from s1 to s2.\n" ); printf( " -z zeta The angle between the rotation axis and the line " "of sight in degrees (required)\n" ); printf( " -4 P4 The carousel's rotation period (in sec)\n" ); printf( "\nOTHER OPTIONS:\n" ); printf( " -b nbins The number of bins in the output profile\n" ); printf( " -c type The spark profile type, either GAUSSIAN (default) " "or TOPHAT\n" ); printf( " -d Use a dipole field instead of the default " "Deutsch field\n" ); printf( " -h Display this help and exit\n" ); printf( " -n nlines Sample nlines magnetic field lines " "(default: 10000)\n" ); printf( " -N nsparks The number of sparks in the carousel. If nsparks " "= 0 (default), the footpoints are sampled " "uniformly in the range given by -s. Otherwise, " "the s-range is used to define the spark size.\n" ); printf( " -o outfile The name of the output file to write to. If not " "set, output will be written to stdout.\n" ); printf( " -O Only consider open field lines (default: off)\n" ); printf( " -p p1:p2 The azimuth relative to the magnetic axis, " "in degrees. The range is from p1 to p2. Ensure " "p1 < p2 [default = 0:360]\n" ); } void parse_cmd_line( int argc, char *argv[], struct opts *o ) { // Collect the command line arguments int c; while ((c = getopt( argc, argv, "a:b:c:df:hn:N:o:Op:P:s:S:z:4:")) != -1) { switch (c) { case 'a': o->al_deg = atof(optarg); break; case 'b': o->nbins = atoi(optarg); break; case 'c': if (strcmp( optarg, "GAUSSIAN" ) == 0) o->csl_type = GAUSSIAN; else if (strcmp( optarg, "TOPHAT" ) == 0) o->csl_type = TOPHAT; else { fprintf( stderr, "error: -c argument must be either " "GAUSSIAN or TOPHAT\n" ); exit(EXIT_FAILURE); } break; case 'd': o->dipole = 1; break; case 'f': parse_range( optarg, &(o->f_start), &(o->f_stop), NULL ); break; case 'h': usage(); exit(EXIT_SUCCESS); break; case 'n': o->num_lines = atoi(optarg); break; case 'N': o->nsparks = atoi(optarg); break; case 'o': o->outfile = strdup(optarg); break; case 'O': o->open_only = 1; break; case 'p': parse_range( optarg, &(o->p_start), &(o->p_stop), NULL ); break; case 'P': o->P_sec = atof(optarg); break; case 's': parse_range( optarg, &(o->s_start), &(o->s_stop), NULL ); break; case 'z': o->ze_deg = atof(optarg); break; case '4': o->P4_sec = atof(optarg); break; case '?': fprintf( stderr, "error: unknown option character '-%c'\n", optopt ); exit(EXIT_FAILURE); break; default: fprintf( stderr, "error: couldn't parse command line\n" ); exit(EXIT_FAILURE); } } // Check that all the arguments are valid if (isnan(o->al_deg) || isnan(o->P_sec) || isnan(o->ze_deg) || isnan(o->P4_sec)) { fprintf( stderr, "error: -a, -P, -z and -4 options required" "\n" ); usage(); exit(EXIT_FAILURE); } if (isnan(o->s_start) || isnan(o->f_start)) { fprintf( stderr, "error: -f and -s options required\n" ); usage(); exit(EXIT_FAILURE); } if (o->nsparks < 0) { fprintf( stderr, "error: -N (=%d) must be >= 0\n", o->nsparks ); exit(EXIT_FAILURE); } } void print_col_headers( FILE *f ) /* The final output includes: * 1) the rotation phase in degrees * 2) the profile power (in arbitrary units) */ { // Print out a line to file handle f fprintf( f, "# phase_deg power\n" ); }
fac_amr_fcoarsen.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Need to fix the way these variables are set and incremented in loops: * vals * ******************************************************************************/ #include "_hypre_sstruct_ls.h" #include "fac.h" #define MapStencilRank(stencil, rank) \ { \ HYPRE_Int ii,jj,kk; \ ii = hypre_IndexX(stencil); \ jj = hypre_IndexY(stencil); \ kk = hypre_IndexZ(stencil); \ if (ii==-1) \ ii=2; \ if (jj==-1) \ jj=2; \ if (kk==-1) \ kk=2; \ rank = ii + 3*jj + 9*kk; \ } #define InverseMapStencilRank(rank, stencil) \ { \ HYPRE_Int ij,ii,jj,kk; \ ij = (rank%9); \ ii = (ij%3); \ jj = (ij-ii)/3; \ kk = (rank-3*jj-ii)/9; \ if (ii==2) \ ii= -1; \ if (jj==2) \ jj= -1; \ if (kk==2) \ kk= -1; \ hypre_SetIndex3(stencil, ii, jj, kk); \ } #define AbsStencilShape(stencil, abs_shape) \ { \ HYPRE_Int ii,jj,kk; \ ii = hypre_IndexX(stencil); \ jj = hypre_IndexY(stencil); \ kk = hypre_IndexZ(stencil); \ abs_shape= hypre_abs(ii) + hypre_abs(jj) + hypre_abs(kk); \ } /*-------------------------------------------------------------------------- * hypre_AMR_FCoarsen: Coarsen the fbox and f/c connections. Forms the * coarse operator by averaging neighboring connections in the refinement * patch. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMR_FCoarsen( hypre_SStructMatrix * A, hypre_SStructMatrix * fac_A, hypre_SStructPMatrix * A_crse, hypre_Index refine_factors, HYPRE_Int level ) { hypre_Box fine_box; hypre_Box intersect_box; MPI_Comm comm = hypre_SStructMatrixComm(A); hypre_SStructGraph *graph = hypre_SStructMatrixGraph(A); HYPRE_Int graph_type = hypre_SStructGraphObjectType(graph); hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph); HYPRE_IJMatrix ij_A = hypre_SStructMatrixIJMatrix(A); HYPRE_Int matrix_type= hypre_SStructMatrixObjectType(A); HYPRE_Int ndim = hypre_SStructMatrixNDim(A); hypre_SStructPMatrix *A_pmatrix = hypre_SStructMatrixPMatrix(fac_A, level); hypre_StructMatrix *smatrix_var; hypre_StructStencil *stencils, *stencils_last; HYPRE_Int stencil_size, stencil_last_size; hypre_Index stencil_shape_i, stencil_last_shape_i; hypre_Index loop_size; hypre_Box loop_box; HYPRE_Real **a_ptrs; hypre_Box *A_dbox; HYPRE_Int part_crse= level-1; HYPRE_Int part_fine= level; hypre_StructMatrix *crse_smatrix; HYPRE_Real *crse_ptr; HYPRE_Real **crse_ptrs; hypre_Box *crse_dbox; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_Index cstart; hypre_Index fstart, fend; hypre_Index stridec, stridef; hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; hypre_BoxArray ***fgrid_crse_extents; hypre_BoxArray ***fbox_interior; hypre_BoxArrayArray ***fbox_bdy; HYPRE_Int ***interior_fboxi; HYPRE_Int ***bdy_fboxi; HYPRE_Int ***cboxi_fboxes; HYPRE_Int **cboxi_fcnt; hypre_BoxArray *fbox_interior_ci, *fbox_bdy_ci_fi; hypre_BoxArrayArray *fbox_bdy_ci; HYPRE_Int *interior_fboxi_ci; HYPRE_Int *bdy_fboxi_ci; HYPRE_Int centre; hypre_BoxArray *data_space; HYPRE_Int ci, fi, arrayi; HYPRE_Int max_stencil_size= 27; HYPRE_Int trueV = 1; HYPRE_Int falseV= 0; HYPRE_Int found, sort; HYPRE_Int stencil_marker; HYPRE_Int *stencil_ranks, *rank_stencils; HYPRE_Int *stencil_contrib_cnt; HYPRE_Int **stencil_contrib_i; HYPRE_Real **weight_contrib_i; HYPRE_Real weights[4]= {1.0, 0.25, 0.125, 0.0625}; HYPRE_Real sum; HYPRE_Int abs_stencil_shape; hypre_Box **shift_box; hypre_Box coarse_cell_box; HYPRE_Int volume_coarse_cell_box; HYPRE_Int *volume_shift_box; HYPRE_Int max_contribut_size, stencil_i, rank; HYPRE_Int startrank; HYPRE_Real *vals, *vals2; HYPRE_Int i, j, k, l, m, n, ll, kk, jj; HYPRE_Int nvars, var1, var2, var2_start; HYPRE_Int iA, iAc, iA_shift_z, iA_shift_zy, iA_shift_zyx; hypre_Index lindex; hypre_Index index1, index2; hypre_Index index_temp; HYPRE_Int **box_graph_indices; HYPRE_Int *box_graph_cnts; HYPRE_Int *box_ranks, *box_ranks_cnt, *box_to_ranks_cnt; HYPRE_Int *cdata_space_ranks, *box_starts, *box_ends; HYPRE_Int *box_connections; HYPRE_Int **coarse_contrib_Uv; HYPRE_Int *fine_interface_ranks; HYPRE_Int nUventries= hypre_SStructGraphNUVEntries(graph); HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph); hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph); hypre_SStructUVEntry *Uventry; HYPRE_Int nUentries, cnt1; hypre_Index index, *cindex, *Uv_cindex; HYPRE_Int box_array_size, cbox_array_size; HYPRE_Int nrows, to_rank; HYPRE_Int *ncols, *rows, *cols; HYPRE_Int **interface_max_stencil_ranks; HYPRE_Int **interface_max_stencil_cnt; HYPRE_Int **interface_rank_stencils; HYPRE_Int **interface_stencil_ranks; HYPRE_Int *coarse_stencil_cnt; HYPRE_Real *stencil_vals; HYPRE_Int *common_rank_stencils, *common_stencil_ranks; HYPRE_Int *common_stencil_i; hypre_BoxManEntry *boxman_entry; HYPRE_Int *temp1, *temp2; HYPRE_Real *temp3; HYPRE_Real sum_contrib, scaling; HYPRE_Int **OffsetA; HYPRE_Int *parents; HYPRE_Int *parents_cnodes; HYPRE_Int myid; hypre_MPI_Comm_rank(comm, &myid); hypre_BoxInit(&fine_box, ndim); hypre_BoxInit(&intersect_box, ndim); hypre_BoxInit(&loop_box, ndim); hypre_BoxInit(&coarse_cell_box, ndim); /*-------------------------------------------------------------------------- * Task: Coarsen the fbox and f/c connections to form the coarse grid * operator inside the fgrid. *--------------------------------------------------------------------------*/ if (graph_type == HYPRE_SSTRUCT) { startrank = hypre_SStructGridGhstartRank(grid); } if (graph_type == HYPRE_PARCSR) { startrank = hypre_SStructGridStartRank(grid); } /*-------------------------------------------------------------------------- * Fine grid strides by the refinement factors. *--------------------------------------------------------------------------*/ hypre_SetIndex3(stridec, 1, 1, 1); for (i= 0; i< ndim; i++) { stridef[i]= refine_factors[i]; } for (i= ndim; i< 3; i++) { stridef[i]= 1; } /*-------------------------------------------------------------------------- * Scaling for averaging row sum. *--------------------------------------------------------------------------*/ scaling= 1.0; for (i= 0; i< ndim-2; i++) { scaling*= refine_factors[0]; } /*-------------------------------------------------------------------------- * Determine the coarsened fine grid- fgrid_crse_extents. * These are between fpart= level and cpart= (level-1). The * fgrid_crse_extents will be indexed by cboxes- the boxarray of coarsened * fboxes FULLY in a given cbox. * * Also, determine the interior and boundary boxes of each fbox. Having * these will allow us to determine the f/c interface nodes without * extensive checking. These are also indexed by the cboxes. * fgrid_interior- for each cbox, we have a collection of child fboxes, * each leading to an interior=> boxarray * fgrid_bdy - for each cbox, we have a collection of child fboxes, * each leading to a boxarray of bdies=> boxarrayarray. * Because we need to know the fbox id for these boxarray/boxarrayarray, * we will need one for each fbox. * * And, determine which cboxes contain a given fbox. That is, given a * fbox, find all cboxes that contain a chunk of it. *--------------------------------------------------------------------------*/ nvars = hypre_SStructPMatrixNVars(A_pmatrix); fgrid_crse_extents = hypre_TAlloc(hypre_BoxArray **, nvars); fbox_interior = hypre_TAlloc(hypre_BoxArray **, nvars); fbox_bdy = hypre_TAlloc(hypre_BoxArrayArray **, nvars); interior_fboxi = hypre_TAlloc(HYPRE_Int **, nvars); bdy_fboxi = hypre_TAlloc(HYPRE_Int **, nvars); cboxi_fboxes = hypre_TAlloc(HYPRE_Int **, nvars); cboxi_fcnt = hypre_TAlloc(HYPRE_Int *, nvars); for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid_crse_extents[var1]= hypre_TAlloc(hypre_BoxArray *, hypre_BoxArraySize(cgrid_boxes)); fbox_interior[var1]= hypre_TAlloc(hypre_BoxArray *, hypre_BoxArraySize(cgrid_boxes)); fbox_bdy[var1] = hypre_TAlloc(hypre_BoxArrayArray *, hypre_BoxArraySize(cgrid_boxes)); interior_fboxi[var1]= hypre_TAlloc(HYPRE_Int *, hypre_BoxArraySize(cgrid_boxes)); bdy_fboxi[var1] = hypre_TAlloc(HYPRE_Int *, hypre_BoxArraySize(cgrid_boxes)); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); cboxi_fboxes[var1]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(fgrid_boxes)); cboxi_fcnt[var1] = hypre_CTAlloc(HYPRE_Int , hypre_BoxArraySize(fgrid_boxes)); /*----------------------------------------------------------------------- * Determine the fine grid boxes that are underlying a coarse grid box. * Coarsen the indices to determine the looping extents of these * boxes. Also, find the looping extents for the extended coarsened * boxes, and the interior and boundary extents of a fine_grid box. * The fine_grid boxes must be adjusted so that only the coarse nodes * inside these boxes are included. Only the lower bound needs to be * adjusted. *-----------------------------------------------------------------------*/ hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); hypre_CopyIndex(hypre_BoxIMin(cgrid_box), cstart); cnt1= 0; temp1= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(fgrid_boxes)); hypre_ClearIndex(index_temp); hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); for (i= 0; i< ndim; i++) { j= fstart[i]%refine_factors[i]; if (j) { fstart[i]+= refine_factors[i] - j; } } hypre_StructMapFineToCoarse(fstart, index_temp, refine_factors, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, refine_factors, hypre_BoxIMax(&fine_box)); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); if (hypre_BoxVolume(&intersect_box) > 0) { temp1[cnt1++]= fi; } } fgrid_crse_extents[var1][ci]= hypre_BoxArrayCreate(cnt1, ndim); fbox_interior[var1][ci] = hypre_BoxArrayCreate(cnt1, ndim); fbox_bdy[var1][ci] = hypre_BoxArrayArrayCreate(cnt1, ndim); interior_fboxi[var1][ci] = hypre_CTAlloc(HYPRE_Int, cnt1); bdy_fboxi[var1][ci] = hypre_CTAlloc(HYPRE_Int, cnt1); for (fi= 0; fi< cnt1; fi++) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, temp1[fi]); hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fend); /*-------------------------------------------------------------------- * record which sides will be adjusted- fstart adjustments will * decrease the box size, whereas fend adjustments will increase the * box size. Since we fstart decreases the box size, we cannot * have an f/c interface at an adjusted fstart end. fend may * correspond to an f/c interface whether it has been adjusted or not. *--------------------------------------------------------------------*/ hypre_SetIndex3(index1, 1, 1, 1); for (i= 0; i< ndim; i++) { j= fstart[i]%refine_factors[i]; if (j) { fstart[i]+= refine_factors[i] - j; index1[i] = 0; } j= fend[i]%refine_factors[i]; if (refine_factors[i]-1 - j) { fend[i] +=(refine_factors[i]-1) - j; } } hypre_StructMapFineToCoarse(fstart, index_temp, refine_factors, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, refine_factors, hypre_BoxIMax(&fine_box)); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); hypre_CopyBox(&intersect_box, hypre_BoxArrayBox(fgrid_crse_extents[var1][ci], fi)); /*-------------------------------------------------------------------- * adjust the fine intersect_box so that we get the interior and * boundaries separately. *--------------------------------------------------------------------*/ hypre_StructMapCoarseToFine(hypre_BoxIMin(&intersect_box), index_temp, refine_factors, hypre_BoxIMin(&fine_box)); /* the following index2 shift for ndim<3 is no problem since refine_factors[j]= 1 for j>=ndim. */ hypre_SetIndex3(index2, refine_factors[0]-1, refine_factors[1]-1, refine_factors[2]-1); hypre_StructMapCoarseToFine(hypre_BoxIMax(&intersect_box), index2, refine_factors, hypre_BoxIMax(&fine_box)); hypre_SetIndex3(index2, 1, 1, 1); hypre_CopyBox(&fine_box, &loop_box); for (i= 0; i< ndim; i++) { hypre_BoxIMin(&loop_box)[i]+= refine_factors[i]*index1[i]; hypre_BoxIMax(&loop_box)[i]-= refine_factors[i]*index2[i]; } hypre_CopyBox(&loop_box, hypre_BoxArrayBox(fbox_interior[var1][ci], fi)); interior_fboxi[var1][ci][fi]= temp1[fi]; hypre_SubtractBoxes(&fine_box, &loop_box, hypre_BoxArrayArrayBoxArray(fbox_bdy[var1][ci], fi)); bdy_fboxi[var1][ci][fi]= temp1[fi]; } hypre_TFree(temp1); } /* hypre_ForBoxI(ci, cgrid_boxes) */ /*-------------------------------------------------------------------- * Determine the cboxes that contain a chunk of a given fbox. *--------------------------------------------------------------------*/ hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); for (i= 0; i< ndim; i++) { j= fstart[i]%refine_factors[i]; if (j) { fstart[i]+= refine_factors[i] - j; } } hypre_StructMapFineToCoarse(fstart, index_temp, refine_factors, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, refine_factors, hypre_BoxIMax(&fine_box)); temp1= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(cgrid_boxes)); hypre_ForBoxI(i, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, i); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); if (hypre_BoxVolume(&intersect_box) > 0) { temp1[cboxi_fcnt[var1][fi]]= i; cboxi_fcnt[var1][fi]++; } } cboxi_fboxes[var1][fi]= hypre_TAlloc(HYPRE_Int, cboxi_fcnt[var1][fi]); for (i= 0; i< cboxi_fcnt[var1][fi]; i++) { cboxi_fboxes[var1][fi][i]= temp1[i]; } hypre_TFree(temp1); } } /* for (var1= 0; var1< nvars; var1++) */ /*-------------------------------------------------------------------------- * STEP 1: * COMPUTE THE COARSE LEVEL OPERATOR INSIDE OF A REFINED BOX. * * We assume that the coarse and fine grid variables are of the same type. * * Coarse stencils in the refinement patches are obtained by averaging the * fine grid coefficients. Since we are assuming cell-centred discretization, * we apply a weighted averaging of ONLY the fine grid coefficients along * interfaces of adjacent agglomerated coarse cells. * * Since the stencil pattern is assumed arbitrary, we must determine the * stencil pattern of each var1-var2 struct_matrix to get the correct * contributing stencil coefficients, averaging weights, etc. *--------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------- * Agglomerated coarse cell info. These are needed in defining the looping * extents for averaging- i.e., we loop over extents determined by the * size of the agglomerated coarse cell. * Note that the agglomerated coarse cell is constructed correctly for * any dimensions (1, 2, or 3). *--------------------------------------------------------------------------*/ hypre_ClearIndex(index_temp); hypre_CopyIndex(index_temp, hypre_BoxIMin(&coarse_cell_box)); hypre_SetIndex3(index_temp, refine_factors[0]-1, refine_factors[1]-1, refine_factors[2]-1 ); hypre_CopyIndex(index_temp, hypre_BoxIMax(&coarse_cell_box)); volume_coarse_cell_box= hypre_BoxVolume(&coarse_cell_box); /*-------------------------------------------------------------------------- * Offsets in y & z directions for refinement patches. These will be used * for pointing to correct coarse stencil location. *--------------------------------------------------------------------------*/ OffsetA = hypre_CTAlloc(HYPRE_Int *, 2); for (i= 0; i< 2; i++) { OffsetA[i]= hypre_CTAlloc(HYPRE_Int, refine_factors[i+1]); } /*-------------------------------------------------------------------------- * Stencil contribution cnts, weights, etc are computed only if we have * a new stencil pattern. If the pattern is the same, the previously * computed stencil contribution cnts, weights, etc can be used. * * Mark the stencil_marker so that the first time the stencil is non-null, * the stencil contribution cnts, weights, etc are computed. *--------------------------------------------------------------------------*/ stencil_marker= trueV; for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); for (var2= 0; var2< nvars; var2++) { stencils= hypre_SStructPMatrixSStencil(A_crse, var1, var2); if (stencils != NULL) { stencil_size= hypre_StructStencilSize(stencils); /*----------------------------------------------------------------- * When stencil_marker== true, form the stencil contributions cnts, * weights, etc. This occurs for the first non-null stencil or * when the stencil shape of the current non-null stencil has a * different stencil shape from that of the latest non-null stencil. * * But when stencil_marker== false, we must check to see if we * need new stencil contributions cnts, weights, etc. Thus, find * the latest non-null stencil for comparison. *-----------------------------------------------------------------*/ if (stencil_marker == falseV) { /* search for the first previous non-null stencil */ found = falseV; var2_start= var2-1; for (j= var1; j>= 0; j--) { for (i= var2_start; i>= 0; i--) { stencils_last= hypre_SStructPMatrixSStencil(A_crse, j, i); if (stencils_last != NULL) { found= trueV; break; } } if (found) { break; } else { var2_start= nvars-1; } } /*-------------------------------------------------------------- * Compare the stencil shape. *--------------------------------------------------------------*/ stencil_last_size= hypre_StructStencilSize(stencils_last); if (stencil_last_size != stencil_size) { stencil_marker= trueV; break; } else { found= falseV; for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); hypre_CopyIndex(hypre_StructStencilElement(stencils_last,i), stencil_last_shape_i); hypre_SetIndex3(index_temp, stencil_shape_i[0]-stencil_last_shape_i[0], stencil_shape_i[1]-stencil_last_shape_i[1], stencil_shape_i[2]-stencil_last_shape_i[2]); AbsStencilShape(index_temp, abs_stencil_shape); if (abs_stencil_shape) { found= trueV; stencil_marker= trueV; hypre_TFree(stencil_contrib_cnt); hypre_TFree(stencil_ranks); for (i= 0; i< stencil_size; i++) { hypre_BoxDestroy(shift_box[i]); } hypre_TFree(shift_box); hypre_TFree(volume_shift_box); hypre_TFree(vals); for (j= 1; j< max_stencil_size; j++) { stencil_i= rank_stencils[j]; if (stencil_i != -1) { hypre_TFree(stencil_contrib_i[stencil_i]); hypre_TFree(weight_contrib_i[stencil_i]); } } hypre_TFree(stencil_contrib_i); hypre_TFree(weight_contrib_i); hypre_TFree(rank_stencils); } if (found) { break; } } /* for (i= 0; i< stencil_size; i++) */ } /* else */ } /* if (stencil_marker == false) */ /*----------------------------------------------------------------- * If stencil_marker==true, form the contribution structures. * Since the type of averaging is determined by the stencil shapes, * we need a ranking of the stencil shape to allow for easy * determination. * * top: 14 12 13 centre: 5 3 4 bottom 23 21 22 * 11 9 10 2 0 1 20 18 19 * 17 15 16 8 6 7 26 24 25 * * for stencil of max. size 27. * * stencil_contrib_cnt[i]= no. of fine stencils averaged to * form stencil entry i. * stencil_contrib_i[i] = rank of fine stencils contributing * to form stencil entry i. * weight_contrib_i[i] = array of weights for weighting * the contributions to stencil entry i. * stencil_ranks[i] = rank of stencil entry i. * rank_stencils[i] = stencil entry of rank i. *-----------------------------------------------------------------*/ if (stencil_marker == trueV) { /* mark stencil_marker for the next stencil */ stencil_marker= falseV; stencil_contrib_cnt= hypre_CTAlloc(HYPRE_Int, stencil_size); stencil_contrib_i = hypre_TAlloc(HYPRE_Int *, stencil_size); weight_contrib_i = hypre_TAlloc(HYPRE_Real *, stencil_size); stencil_ranks = hypre_TAlloc(HYPRE_Int, stencil_size); rank_stencils = hypre_TAlloc(HYPRE_Int, max_stencil_size); shift_box = hypre_TAlloc(hypre_Box *, stencil_size); volume_shift_box = hypre_TAlloc(HYPRE_Int, stencil_size); for (i= 0; i< max_stencil_size; i++) { rank_stencils[i]= -1; if (i < stencil_size) { stencil_ranks[i]= -1; } } /*----------------------------------------------------------------- * Get mappings between stencil entries and ranks and vice versa; * fine grid looping extents for averaging of the fine coefficients; * and the number of fine grid values to be averaged. * Note that the shift_boxes are constructed correctly for any * dimensions. For j>=ndim, * hypre_BoxIMin(shift_box[i])[j]=hypre_BoxIMax(shift_box[i])[j]= 0. *-----------------------------------------------------------------*/ for (i= 0; i< stencil_size; i++) { shift_box[i] = hypre_BoxCreate(ndim); hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); MapStencilRank(stencil_shape_i, j); stencil_ranks[i]= j; rank_stencils[stencil_ranks[i]] = i; hypre_SetIndex3(hypre_BoxIMin(shift_box[i]), (refine_factors[0]-1)*stencil_shape_i[0], (refine_factors[1]-1)*stencil_shape_i[1], (refine_factors[2]-1)*stencil_shape_i[2]); hypre_AddIndexes(hypre_BoxIMin(shift_box[i]), hypre_BoxIMax(&coarse_cell_box), 3, hypre_BoxIMax(shift_box[i])); hypre_IntersectBoxes(&coarse_cell_box, shift_box[i], shift_box[i]); volume_shift_box[i]= hypre_BoxVolume(shift_box[i]); } /*----------------------------------------------------------------- * Derive the contribution info. * The above rank table is used to determine the direction indices. * Weight construction procedure valid for any dimensions. *-----------------------------------------------------------------*/ /* east */ stencil_i= rank_stencils[1]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 4; i<= 7; i+=3) { if (rank_stencils[i] != -1) /* ne or se */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 1; i<= 7; i+=3) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= stencil_contrib_cnt[stencil_i]; } /* fill up the east contribution stencil indices */ if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 4; i<= 7; i+=3) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 1; i<= 7; i+=3) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* west */ stencil_i= rank_stencils[2]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 5; i<= 8; i+=3) { if (rank_stencils[i] != -1) /* nw or sw */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 2; i<= 8; i+=3) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 5; i<= 8; i+=3) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 2; i<= 8; i+=3) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* north */ stencil_i= rank_stencils[3]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 4; i<= 5; i++) { if (rank_stencils[i] != -1) /* ne or nw */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 3; i<= 5; i++) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 4; i<= 5; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 3; i<= 5; i++) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* south */ stencil_i= rank_stencils[6]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i= 7; i<= 8; i++) { if (rank_stencils[i] != -1) /* ne or nw */ stencil_contrib_cnt[stencil_i]++; } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 6; i<= 8; i++) { if (rank_stencils[j*9+i] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } } } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i= 7; i<= 8; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } if (ndim > 2) { for (j= 1; j<= 2; j++) { for (i= 6; i<= 8; i++) { if (rank_stencils[j*9+i] != -1) { stencil_contrib_i[stencil_i][k] = rank_stencils[j*9+i]; AbsStencilShape( hypre_StructStencilElement(stencils,rank_stencils[j*9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /*----------------------------------------------------------------- * If only 2-d, extract the corner indices. *-----------------------------------------------------------------*/ if (ndim == 2) { /* corners: ne & nw */ for (i= 4; i<= 5; i++) { stencil_i= rank_stencils[i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } /* corners: se & sw */ for (i= 7; i<= 8; i++) { stencil_i= rank_stencils[i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } } /*----------------------------------------------------------------- * Additional directions for 3-dim case *-----------------------------------------------------------------*/ if (ndim > 2) { /* sides: top */ stencil_i= rank_stencils[9]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=1; i<= 8; i++) { if (rank_stencils[9+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=1; i<= 8; i++) { if (rank_stencils[9+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[9+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[9+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* sides: bottom */ stencil_i= rank_stencils[18]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=1; i<= 8; i++) { if (rank_stencils[18+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=1; i<= 8; i++) { if (rank_stencils[18+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[18+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[18+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: cne */ stencil_i= rank_stencils[4]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+4] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+4] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+4]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+4]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: cse */ stencil_i= rank_stencils[7]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+7] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+7] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+7]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+7]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: cnw */ stencil_i= rank_stencils[5]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+5] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+5] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+5]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+5]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: csw */ stencil_i= rank_stencils[8]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+8] != -1) /* bottom or top planes */ stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (j=1; j<= 2; j++) { if (rank_stencils[j*9+8] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[j*9+8]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[j*9+8]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top east */ stencil_i= rank_stencils[10]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[10+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[10+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[10+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[10+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top west */ stencil_i= rank_stencils[11]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[11+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[11+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[11+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[11+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top north */ stencil_i= rank_stencils[12]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=13; i<= 14; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=13; i<= 14; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: top south*/ stencil_i= rank_stencils[15]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=16; i<= 17; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=16; i<= 17; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom east */ stencil_i= rank_stencils[19]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[19+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[19+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[19+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[19+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom west */ stencil_i= rank_stencils[20]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=3; i<= 6; i+=3) { if (rank_stencils[20+i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=3; i<= 6; i+=3) { if (rank_stencils[20+i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[20+i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[20+i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom north */ stencil_i= rank_stencils[21]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=22; i<= 23; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=22; i<= 23; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* edges: bottom south*/ stencil_i= rank_stencils[24]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; for (i=25; i<= 26; i++) { if (rank_stencils[i] != -1) stencil_contrib_cnt[stencil_i]++; } max_contribut_size= hypre_max( max_contribut_size, stencil_contrib_cnt[stencil_i] ); } if (stencil_i != -1) { stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, stencil_contrib_cnt[stencil_i]); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, stencil_contrib_cnt[stencil_i]); sum= 0.0; k= 0; stencil_contrib_i[stencil_i][k]= stencil_i; AbsStencilShape( hypre_StructStencilElement(stencils,stencil_i), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; for (i=25; i<= 26; i++) { if (rank_stencils[i] != -1) { stencil_contrib_i[stencil_i][k]= rank_stencils[i]; AbsStencilShape(hypre_StructStencilElement(stencils,rank_stencils[i]), abs_stencil_shape ); weight_contrib_i[stencil_i][k++] = weights[abs_stencil_shape]; sum+= weights[abs_stencil_shape]; } } for (i= 0; i< k ; i++) { weight_contrib_i[stencil_i][i]/= sum; } } /* corners*/ for (j= 1; j<= 2; j++) { for (i= 4; i<= 5; i++) { stencil_i= rank_stencils[9*j+i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } for (i= 7; i<= 8; i++) { stencil_i= rank_stencils[9*j+i]; if (stencil_i != -1) { stencil_contrib_cnt[stencil_i]++; stencil_contrib_i[stencil_i]= hypre_TAlloc(HYPRE_Int, 1); weight_contrib_i[stencil_i] = hypre_TAlloc(HYPRE_Real, 1); stencil_contrib_i[stencil_i][0]= stencil_i; weight_contrib_i[stencil_i][0] = weights[0]; } } } } /* if ndim > 2 */ /*----------------------------------------------------------------- * Allocate for the temporary vector used in computing the * averages. *-----------------------------------------------------------------*/ vals= hypre_CTAlloc(HYPRE_Real, max_contribut_size); /*----------------------------------------------------------------- * coarse grid stencil contributor structures have been formed. *-----------------------------------------------------------------*/ } /* if (stencil_marker == true) */ /*--------------------------------------------------------------------- * Loop over gridboxes to average stencils *---------------------------------------------------------------------*/ smatrix_var = hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2); crse_smatrix= hypre_SStructPMatrixSMatrix(A_crse, var1, var2); /*--------------------------------------------------------------------- * data ptrs to extract and fill in data. *---------------------------------------------------------------------*/ a_ptrs = hypre_TAlloc(HYPRE_Real *, stencil_size); crse_ptrs= hypre_TAlloc(HYPRE_Real *, stencil_size); hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); fbox_interior_ci = fbox_interior[var1][ci]; fbox_bdy_ci = fbox_bdy[var1][ci]; interior_fboxi_ci= interior_fboxi[var1][ci]; bdy_fboxi_ci = bdy_fboxi[var1][ci]; crse_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(crse_smatrix), ci); /*------------------------------------------------------------------ * grab the correct coarse grid pointers. These are the parent base * grids. *------------------------------------------------------------------*/ for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); crse_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(crse_smatrix, ci, stencil_shape_i); } /*------------------------------------------------------------------ * Loop over the interior of each patch inside cgrid_box. *------------------------------------------------------------------*/ hypre_ForBoxI(fi, fbox_interior_ci) { fgrid_box= hypre_BoxArrayBox(fbox_interior_ci, fi); /*-------------------------------------------------------------- * grab the fine grid ptrs & create the offsets for the fine * grid ptrs. *--------------------------------------------------------------*/ A_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), interior_fboxi_ci[fi]); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var, interior_fboxi_ci[fi], stencil_shape_i); } /*--------------------------------------------------------------- * Compute the offsets for pointing to the correct data. * Note that for 1-d, OffsetA[j][i]= 0. Therefore, this ptr * will be correct for 1-d. *---------------------------------------------------------------*/ for (j= 0; j< 2; j++) { OffsetA[j][0]= 0; for (i= 1; i< refine_factors[j+1]; i++) { if (j == 0) { hypre_SetIndex3(index_temp, 0, i, 0); } else { hypre_SetIndex3(index_temp, 0, 0, i); } OffsetA[j][i] = hypre_BoxOffsetDistance(A_dbox, index_temp); } } hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fend); /* coarsen the interior patch box*/ hypre_ClearIndex(index_temp); hypre_StructMapFineToCoarse(fstart, index_temp, stridef, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(fend, index_temp, stridef, hypre_BoxIMax(&fine_box)); hypre_CopyIndex(hypre_BoxIMin(&fine_box), cstart); /*---------------------------------------------------------------- * Loop over interior grid box. *----------------------------------------------------------------*/ hypre_BoxGetSize(&fine_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, A_dbox, fstart, stridef, iA, crse_dbox, cstart, stridec, iAc); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc,i,rank,index1,index2,m,l,k,j,iA_shift_z,iA_shift_zy,iA_shift_zyx,stencil_i,sum,vals) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop2For(iA, iAc) { for (i= 0; i< stencil_size; i++) { rank= stencil_ranks[i]; /*------------------------------------------------------------ * Loop over refinement agglomeration making up a coarse cell * when a non-centre stencil. *------------------------------------------------------------*/ if (rank) { /*-------------------------------------------------------- * Loop over refinement agglomeration extents making up a * a coarse cell. *--------------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(shift_box[i]), index1); hypre_CopyIndex(hypre_BoxIMax(shift_box[i]), index2); for (m= 0; m< stencil_contrib_cnt[i]; m++) { vals[m]= 0.0; } /*-------------------------------------------------------- * For 1-d, index1[l]= index2[l]= 0, l>=1. So * iA_shift_zyx= j, * which is correct. Similarly, 2-d is correct. *--------------------------------------------------------*/ for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; for (m= 0; m< stencil_contrib_cnt[i]; m++) { stencil_i= stencil_contrib_i[i][m]; vals[m]+= a_ptrs[stencil_i][iA_shift_zyx]; } } } } /*---------------------------------------------------------- * average & weight the contributions and place into coarse * stencil entry. *----------------------------------------------------------*/ crse_ptrs[i][iAc]= 0.0; for (m= 0; m< stencil_contrib_cnt[i]; m++) { crse_ptrs[i][iAc]+= vals[m]*weight_contrib_i[i][m]; } crse_ptrs[i][iAc]/= volume_shift_box[i]; } /* if (rank) */ } /* for i */ /*------------------------------------------------------------------ * centre stencil: * The centre stencil is computed so that the row sum is equal to * the sum of the row sums of the fine matrix. Uses the computed * coarse off-diagonal stencils. * * No fine-coarse interface for the interior boxes. *------------------------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(&coarse_cell_box), index1); hypre_CopyIndex(hypre_BoxIMax(&coarse_cell_box), index2); sum= 0.0; for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; for (m= 0; m< stencil_size; m++) { sum+= a_ptrs[m][iA_shift_zyx]; } } } } /*--------------------------------------------------------------- * coarse centre coefficient- when away from the fine-coarse * interface, the centre coefficient is the sum of the * off-diagonal components. *---------------------------------------------------------------*/ sum /= scaling; for (m= 0; m< stencil_size; m++) { rank= stencil_ranks[m]; if (rank) { sum-= crse_ptrs[m][iAc]; } } crse_ptrs[ rank_stencils[0] ][iAc]= sum; } hypre_BoxLoop2End(iA, iAc); } /* end hypre_ForBoxI(fi, fbox_interior_ci) */ /*------------------------------------------------------------------ * Loop over the boundaries of each patch inside cgrid_box. *------------------------------------------------------------------*/ hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) { fbox_bdy_ci_fi= hypre_BoxArrayArrayBoxArray(fbox_bdy_ci, arrayi); hypre_ForBoxI(fi, fbox_bdy_ci_fi) { fgrid_box= hypre_BoxArrayBox(fbox_bdy_ci_fi, fi); /*----------------------------------------------------------- * grab the fine grid ptrs & create the offsets for the fine * grid ptrs. *-----------------------------------------------------------*/ A_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), bdy_fboxi_ci[arrayi]); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var, bdy_fboxi_ci[arrayi], stencil_shape_i); } /*-------------------------------------------------------------- * Compute the offsets for pointing to the correct data. *--------------------------------------------------------------*/ for (j= 0; j< 2; j++) { OffsetA[j][0]= 0; for (i= 1; i< refine_factors[j+1]; i++) { if (j == 0) { hypre_SetIndex3(index_temp, 0, i, 0); } else { hypre_SetIndex3(index_temp, 0, 0, i); } OffsetA[j][i] = hypre_BoxOffsetDistance(A_dbox, index_temp); } } hypre_CopyIndex(hypre_BoxIMin(fgrid_box), fstart); hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fend); /* coarsen the patch box*/ hypre_ClearIndex(index_temp); hypre_StructMapFineToCoarse(fstart, index_temp, stridef, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(fend, index_temp, stridef, hypre_BoxIMax(&fine_box)); hypre_CopyIndex(hypre_BoxIMin(&fine_box), cstart); /*-------------------------------------------------------------- * Loop over boundary grid box. *--------------------------------------------------------------*/ hypre_BoxGetSize(&fine_box, loop_size); hypre_BoxLoop2Begin(ndim, loop_size, A_dbox, fstart, stridef, iA, crse_dbox, cstart, stridec, iAc); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc,i,rank,index1,index2,m,l,k,j,iA_shift_z,iA_shift_zy,iA_shift_zyx,stencil_i,temp3,ll,kk,jj,temp2,cnt1,index_temp,boxman_entry,found,Uventry,nUentries,ncols,rows,cols,vals2,sum,vals) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop2For(iA, iAc) { hypre_BoxLoopGetIndex(lindex); for (i= 0; i< stencil_size; i++) { rank= stencil_ranks[i]; /*-------------------------------------------------------- * Loop over refinement agglomeration making up a coarse * cell when a non-centre stencil. *--------------------------------------------------------*/ if (rank) { /*----------------------------------------------------- * Loop over refinement agglomeration extents making up * a coarse cell. *-----------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(shift_box[i]), index1); hypre_CopyIndex(hypre_BoxIMax(shift_box[i]), index2); for (m= 0; m< stencil_contrib_cnt[i]; m++) { vals[m]= 0.0; } for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; for (m= 0; m< stencil_contrib_cnt[i]; m++) { stencil_i= stencil_contrib_i[i][m]; vals[m]+= a_ptrs[stencil_i][iA_shift_zyx]; } } } } /*--------------------------------------------------------- * average & weight the contributions and place into coarse * stencil entry. *---------------------------------------------------------*/ crse_ptrs[i][iAc]= 0.0; for (m= 0; m< stencil_contrib_cnt[i]; m++) { crse_ptrs[i][iAc]+= vals[m]*weight_contrib_i[i][m]; } crse_ptrs[i][iAc]/= volume_shift_box[i]; } /* if (rank) */ } /* for i */ /*--------------------------------------------------------------- * centre stencil: * The centre stencil is computed so that the row sum is equal to * th sum of the row sums of the fine matrix. Uses the computed * coarse off-diagonal stencils. * * Along the fine-coarse interface, we need to add the unstructured * connections. *---------------------------------------------------------------*/ hypre_CopyIndex(hypre_BoxIMin(&coarse_cell_box), index1); hypre_CopyIndex(hypre_BoxIMax(&coarse_cell_box), index2); temp3= hypre_CTAlloc(HYPRE_Real, volume_coarse_cell_box); /*--------------------------------------------------------------- * iA_shift_zyx is computed correctly for 1 & 2-d. Also, * ll= 0 for 2-d, and ll= kk= 0 for 1-d. Correct ptrs. *---------------------------------------------------------------*/ for (l= index1[2]; l<= index2[2]; l++) { iA_shift_z= iA + OffsetA[1][l]; ll = l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { iA_shift_zy= iA_shift_z + OffsetA[0][k]; kk = ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { iA_shift_zyx= iA_shift_zy + j; jj = kk + j; for (m= 0; m< stencil_size; m++) { temp3[jj]+= a_ptrs[m][iA_shift_zyx]; } } } } /*------------------------------------------------------------ * extract all unstructured connections. Note that we extract * from sstruct_matrix A, which already has been assembled. *------------------------------------------------------------*/ if (nUventries > 0) { temp2= hypre_CTAlloc(HYPRE_Int, volume_coarse_cell_box); cnt1= 0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk+ j; hypre_SetIndex3(index_temp, j+lindex[0]*stridef[0], k+lindex[1]*stridef[1], l+lindex[2]*stridef[2]); hypre_AddIndexes(fstart, index_temp, 3, index_temp); hypre_SStructGridFindBoxManEntry(grid, part_fine, index_temp, var1, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index_temp, &rank, matrix_type); found= falseV; i= hypre_SStructGraphIUVEntry(graph, 0); m= hypre_SStructGraphIUVEntry(graph, nUventries-1); if ((rank-startrank) >= i && (rank-startrank) <= m) { found= trueV; } if (found) { Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank); if (Uventry != NULL) { nUentries= hypre_SStructUVEntryNUEntries(Uventry); m= 0; for (i= 0; i< nUentries; i++) { if (hypre_SStructUVEntryToPart(Uventry, i)==part_crse) { m++; } } /* for (i= 0; i< nUentries; i++) */ temp2[jj]= m; cnt1 += m; } /* if (Uventry != NULL) */ } /* if (found) */ } /* for (j= index1[0]; j<= index2[0]; j++) */ } /* for (k= index1[1]; k<= index2[1]; k++) */ } /* for (l= index1[2]; l<= index2[2]; l++) */ ncols= hypre_TAlloc(HYPRE_Int, cnt1); for (l= 0; l< cnt1; l++) { ncols[l]= 1; } rows = hypre_TAlloc(HYPRE_Int, cnt1); cols = hypre_TAlloc(HYPRE_Int, cnt1); vals2= hypre_CTAlloc(HYPRE_Real, cnt1); cnt1= 0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk+ j; hypre_SetIndex3(index_temp, j+lindex[0]*stridef[0], k+lindex[1]*stridef[1], l+lindex[2]*stridef[2]); hypre_AddIndexes(fstart, index_temp, 3, index_temp); hypre_SStructGridFindBoxManEntry(grid, part_fine, index_temp, var1, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index_temp, &rank, matrix_type); found= falseV; if (nUventries > 0) { i= hypre_SStructGraphIUVEntry(graph, 0); m= hypre_SStructGraphIUVEntry(graph, nUventries-1); if ((rank-startrank) >= i && (rank-startrank) <= m) { found= trueV; } } if (found) { Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank); if (Uventry != NULL) { nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (i= 0; i< nUentries; i++) { if (hypre_SStructUVEntryToPart(Uventry, i)==part_crse) { rows[cnt1]= rank; cols[cnt1++]= hypre_SStructUVEntryToRank(Uventry, i); } } /* for (i= 0; i< nUentries; i++) */ } /* if (Uventry != NULL) */ } /* if (found) */ } /* for (j= index1[0]; j<= index2[0]; j++) */ } /* for (k= index1[1]; k<= index2[1]; k++) */ } /* for (l= index1[2]; l<= index2[2]; l++) */ HYPRE_IJMatrixGetValues(ij_A, cnt1, ncols, rows, cols, vals2); cnt1= 0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk+ j; for (m= 0; m< temp2[jj]; m++) { temp3[jj]+= vals2[cnt1]; cnt1++; } temp2[jj]= 0; /* zero off for next time */ } /* for (j= index1[0]; j<= index2[0]; j++) */ } /* for (k= index1[1]; k<= index2[1]; k++) */ } /* for (l= index1[2]; l<= index2[2]; l++) */ hypre_TFree(ncols); hypre_TFree(rows); hypre_TFree(cols); hypre_TFree(vals2); hypre_TFree(temp2); } /* if Uventries > 0 */ sum= 0.0; for (l= index1[2]; l<= index2[2]; l++) { ll= l*refine_factors[1]*refine_factors[0]; for (k= index1[1]; k<= index2[1]; k++) { kk= ll + k*refine_factors[0]; for (j= index1[0]; j<= index2[0]; j++) { jj= kk + j; sum+= temp3[jj]; } } } sum /= scaling; crse_ptrs[ rank_stencils[0] ][iAc]= sum; hypre_TFree(temp3); } hypre_BoxLoop2End(iA, iAc); } /* hypre_ForBoxI(fi, fbox_bdy_ci_fi) */ } /* hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) */ } /* hypre_ForBoxI(ci, cgrid_boxes) */ hypre_TFree(a_ptrs); hypre_TFree(crse_ptrs); } /* if (stencils != NULL) */ } /* end var2 */ } /* end var1 */ if (stencil_contrib_cnt) hypre_TFree(stencil_contrib_cnt); if (stencil_ranks) hypre_TFree(stencil_ranks); if (volume_shift_box) hypre_TFree(volume_shift_box); if (vals) hypre_TFree(vals); if (shift_box) { for (j= 0; j< stencil_size; j++) { if (shift_box[j]) hypre_BoxDestroy(shift_box[j]); } hypre_TFree(shift_box); } if (stencil_contrib_i) { for (j= 1; j< max_stencil_size; j++) { stencil_i= rank_stencils[j]; if (stencil_i != -1) { if (stencil_contrib_i[stencil_i]) hypre_TFree(stencil_contrib_i[stencil_i]); } } hypre_TFree(stencil_contrib_i); } if (weight_contrib_i) { for (j= 1; j< max_stencil_size; j++) { stencil_i= rank_stencils[j]; if (stencil_i != -1) { if (weight_contrib_i[stencil_i]) hypre_TFree(weight_contrib_i[stencil_i]); } } hypre_TFree(weight_contrib_i); } if (rank_stencils) hypre_TFree(rank_stencils); if (OffsetA) { for (j= 0; j< 2; j++) { if (OffsetA[j]) hypre_TFree(OffsetA[j]); } hypre_TFree(OffsetA); } /*-------------------------------------------------------------------------- * STEP 2: * * Interface coarsening: fine-to-coarse connections. We are * assuming that only like-variables couple along interfaces. * * The task is to coarsen all the fine-to-coarse unstructured * connections and to compute coarse coefficients along the * interfaces (coarse-to-fine coefficients are obtained from these * computed values assuming symmetry). This involves * 1) scanning over the graph entries to find the locations of * the unstructure connections; * 2) determining the stencil shape of the coarsened connections; * 3) averaging the unstructured coefficients to compute * coefficient entries for the interface stencils; * 4) determining the weights of the interface stencil coefficients * to construct the structured coarse grid matrix along the * interfaces. * * We perform this task by * 1) scanning over the graph entries to group the locations * of the fine-to-coarse connections wrt the boxes of the * fine grid. Temporary vectors storing the Uventries indices * and the number of connections for each box will be created; * 2) for each fine grid box, group the fine-to-coarse connections * with respect to the connected coarse nodes. Temporary vectors * storing the Uventry indices and the Uentry indices for each * coarse node will be created (i.e., for a fixed coarse grid node, * record the fine node Uventries indices that connect to this * coarse node and Uentry index of the Uventry that contains * this coarse node.). The grouping is accomplished comparing the * ranks of the coarse nodes; * 3) using the Uventries and Uentry indices for each coarse node, * "coarsen" the fine grid connections to this coarse node to * create interface stencils (wrt to the coarse nodes- i.e., * the centre of the stencil is at a coarse node). Also, find * the IJ rows and columns corresponding to all the fine-to-coarse * connections in a box, and extract the unstructured coefficients; * 4) looping over all coarse grid nodes connected to a fixed fine box, * compute the arithmetically averaged interface stencils; * 5) compare the underlying coarse grid structured stencil shape * to the interface stencil shape to determine how to weight the * averaged interface stencil coefficients. * * EXCEPTION: A NODE CAN CONTAIN ONLY UNSTRUCTURED CONNECTIONS * BETWEEN ONLY TWO AMR LEVELS- I.E., WE CANNOT HAVE A NODE THAT * IS ON THE INTERFACE OF MORE THAN TWO AMR LEVELS. CHANGES TO * HANDLE THIS LATTER CASE WILL INVOLVE THE SEARCH FOR f/c * CONNECTIONS. *-----------------------------------------------------------------*/ if (nUventries > 0) { nvars = hypre_SStructPMatrixNVars(A_pmatrix); for (var1= 0; var1< nvars; var1++) { /*----------------------------------------------------------------- * Yank out the structured stencils for this variable (only like * variables considered) and find their ranks. *-----------------------------------------------------------------*/ stencils = hypre_SStructPMatrixSStencil(A_crse, var1, var1); stencil_size= hypre_StructStencilSize(stencils); stencil_ranks= hypre_TAlloc(HYPRE_Int, stencil_size); rank_stencils= hypre_TAlloc(HYPRE_Int, max_stencil_size); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); MapStencilRank( stencil_shape_i, stencil_ranks[i] ); rank_stencils[ stencil_ranks[i] ] = i; } /*----------------------------------------------------------------- * qsort the ranks into ascending order *-----------------------------------------------------------------*/ hypre_qsort0(stencil_ranks, 0, stencil_size-1); crse_smatrix= hypre_SStructPMatrixSMatrix(A_crse, var1, var1); cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); box_starts= hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(fgrid_boxes)); box_ends = hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(fgrid_boxes)); hypre_SStructGraphFindSGridEndpts(graph, part_fine, var1, myid, 0, box_starts); hypre_SStructGraphFindSGridEndpts(graph, part_fine, var1, myid, 1, box_ends); /*----------------------------------------------------------------- * Step 1: scanning over the graph entries to group the locations * of the unstructured connections wrt to fine grid boxes. * * Count the components that couple for each box. * * box_graph_indices[fi]= array of Uventries indices in box fi. * box_graph_cnts[fi] = number of Uventries in box fi. * cdata_space_rank[ci] = begin offset rank of coarse data_space * box ci. *-----------------------------------------------------------------*/ box_array_size = hypre_BoxArraySize(fgrid_boxes); cbox_array_size = hypre_BoxArraySize(cgrid_boxes); box_graph_indices= hypre_CTAlloc(HYPRE_Int *, box_array_size); box_graph_cnts = hypre_CTAlloc(HYPRE_Int , box_array_size); data_space = hypre_StructMatrixDataSpace(crse_smatrix); cdata_space_ranks= hypre_CTAlloc(HYPRE_Int, cbox_array_size); cdata_space_ranks[0]= 0; for (i= 1; i< cbox_array_size; i++) { cdata_space_ranks[i]= cdata_space_ranks[i-1]+ hypre_BoxVolume(hypre_BoxArrayBox(data_space, i-1)); } /*----------------------------------------------------------------- * Scanning obtained by searching iUventries between the start * and end of a fine box. Binary search used to find the interval * between these two endpts. Index (-1) returned if no interval * bounds found. Note that if start has positive index, then end * must have a positive index also. *-----------------------------------------------------------------*/ for (fi= 0; fi< box_array_size; fi++) { i= hypre_LowerBinarySearch(iUventries, box_starts[fi], nUventries); if (i >= 0) { j= hypre_UpperBinarySearch(iUventries, box_ends[fi], nUventries); box_graph_indices[fi]= hypre_TAlloc(HYPRE_Int, j-i+1); for (k= 0; k< (j-i+1); k++) { Uventry= hypre_SStructGraphUVEntry(graph, iUventries[i+k]); for (m= 0; m< hypre_SStructUVEntryNUEntries(Uventry); m++) { if (hypre_SStructUVEntryToPart(Uventry, m) == part_crse) { box_graph_indices[fi][box_graph_cnts[fi]]= iUventries[i+k]; box_graph_cnts[fi]++; break; } } /* for (m= 0; m< hypre_SStructUVEntryNUEntries(Uventry); m++) */ } /* for (k= 0; k< (j-i+1); k++) */ } /* if (i >= 0) */ } /* for (fi= 0; fi< box_array_size; fi++) */ /*----------------------------------------------------------------- * Step 2: * Determine and group the fine-to-coarse connections in a box. * Grouped according to the coarsened fine grid interface nodes. * * box_ranks = ranks of coarsened fine grid interface * nodes. * box_connections = counter for the distinct coarsened fine * grid interface nodes. This can be * used to group all the Uventries of a * coarsened fine grid node. * cindex[l] = the hypre_Index of coarsen node l. * parents_cnodes[l] = parent box that contains the coarsened * fine grid interface node l. * fine_interface_ranks[l]= rank of coarsened fine grid interface * node l. * box_ranks_cnt[l] = counter for no. of Uventries for * coarsened node l. * coarse_contrib_Uv[l] = Uventry indices for Uventries that * contain fine-to-coarse connections of * coarse node l. *-----------------------------------------------------------------*/ for (fi= 0; fi< box_array_size; fi++) { /*------------------------------------------------------------- * Determine the coarse data ptrs corresponding to fine box fi. * These are needed in assigning the averaged unstructured * coefficients. * * Determine how many distinct coarse grid nodes are in the * unstructured connection for a given box. Each node has a * structures. * * temp1 & temp2 are linked lists vectors used for grouping the * Uventries for a given coarse node. *-------------------------------------------------------------*/ box_ranks = hypre_TAlloc(HYPRE_Int, box_graph_cnts[fi]); box_connections = hypre_TAlloc(HYPRE_Int, box_graph_cnts[fi]); parents = hypre_TAlloc(HYPRE_Int, box_graph_cnts[fi]); temp1 = hypre_CTAlloc(HYPRE_Int, box_graph_cnts[fi]+1); temp2 = hypre_CTAlloc(HYPRE_Int, box_graph_cnts[fi]); Uv_cindex = hypre_TAlloc(hypre_Index, box_graph_cnts[fi]); /*------------------------------------------------------------- * determine the parent box of this fgrid_box. *-------------------------------------------------------------*/ hypre_ClearIndex(index_temp); for (i= 0; i < box_graph_cnts[fi]; i++) { Uventry = Uventries[box_graph_indices[fi][i]]; /*------------------------------------------------------------- * Coarsen the fine grid interface nodes and then get their * ranks. The correct coarse grid is needed to determine the * correct data_box. * Save the rank of the coarsened index & the parent box id. *-------------------------------------------------------------*/ hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); hypre_StructMapFineToCoarse(index, index_temp, stridef, Uv_cindex[i]); hypre_BoxSetExtents(&fine_box, Uv_cindex[i], Uv_cindex[i]); for (j= 0; j< cboxi_fcnt[var1][fi]; j++) { ci= cboxi_fboxes[var1][fi][j]; cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); hypre_IntersectBoxes(&fine_box, cgrid_box, &intersect_box); if (hypre_BoxVolume(&intersect_box) > 0) { break; } } parents[i] = ci; box_ranks[i]= cdata_space_ranks[ci] + hypre_BoxIndexRank(hypre_BoxArrayBox(data_space, ci), Uv_cindex[i]); } /*--------------------------------------------------------------- * Determine and "group" the Uventries using the box_ranks. * temp2 stores the Uventries indices for a coarsen node. *---------------------------------------------------------------*/ cnt1= 0; j = 0; temp1[cnt1]= j; for (i= 0; i< box_graph_cnts[fi]; i++) { if (box_ranks[i] != -1) { k = box_ranks[i]; box_connections[i]= cnt1; temp2[j++] = box_graph_indices[fi][i]; for (l= i+1; l< box_graph_cnts[fi]; l++) { if (box_ranks[l] == k) { box_connections[l]= cnt1; temp2[j++] = box_graph_indices[fi][l]; box_ranks[l] =-1; } } cnt1++; temp1[cnt1]= j; } } /*----------------------------------------------------------------- * Store the graph entry info and other index info for each coarse * grid node. *-----------------------------------------------------------------*/ parents_cnodes = hypre_TAlloc(HYPRE_Int, cnt1); fine_interface_ranks= hypre_TAlloc(HYPRE_Int, cnt1); box_ranks_cnt = hypre_CTAlloc(HYPRE_Int, cnt1); coarse_contrib_Uv = hypre_TAlloc(HYPRE_Int *, cnt1); cindex = hypre_TAlloc(hypre_Index, cnt1); for (i= 0; i< box_graph_cnts[fi]; i++) { if (box_ranks[i] != -1) { j = box_connections[i]; parents_cnodes[j] = parents[i]; fine_interface_ranks[j]= hypre_BoxIndexRank(hypre_BoxArrayBox(data_space, parents[i]), Uv_cindex[i]); hypre_CopyIndex(Uv_cindex[i], cindex[j]); box_ranks_cnt[j] = temp1[j+1] - temp1[j]; coarse_contrib_Uv[j] = hypre_TAlloc(HYPRE_Int, box_ranks_cnt[j]); l = temp1[j]; for (k= 0; k< box_ranks_cnt[j]; k++) { coarse_contrib_Uv[j][k]= temp2[l+k]; } } } if (box_ranks) hypre_TFree(box_ranks); if (box_connections) hypre_TFree(box_connections); if (parents) hypre_TFree(parents); if (temp1) hypre_TFree(temp1); if (temp2) hypre_TFree(temp2); if (Uv_cindex) hypre_TFree(Uv_cindex); /*------------------------------------------------------------------------ * Step 3: * Create the interface stencils. * * interface_max_stencil_ranks[i] = stencil_shape rank for each coarse * Uentry connection of coarsened node * i (i.e., the stencil_shape ranks of * the interface stencils at node i). * interface_max_stencil_cnt[i][m]= counter for number of Uentries * that describes a connection which * coarsens into stencil_shape rank m. * coarse_stencil_cnts[i] = counter for the no. of distinct * interface stencil_shapes (i.e., the * no. entries of the interface stencil). * interface_stencil_ranks[i][l] = stencil_shape rank for interface * stencil entry l, for coarse node i. * interface_rank_stencils[i][j] = interface stencil entry for * stencil_shape rank j, for node i. *------------------------------------------------------------------------*/ /*----------------------------------------------------------------- * Extract rows & cols info for extracting data from IJ matrix. * Extract for all connections for a box. *-----------------------------------------------------------------*/ hypre_ClearIndex(index_temp); nrows= 0; box_to_ranks_cnt= hypre_CTAlloc(HYPRE_Int, cnt1); for (i= 0; i< cnt1; i++) { for (j= 0; j< box_ranks_cnt[i]; j++) { Uventry = Uventries[ coarse_contrib_Uv[i][j] ]; for (k= 0; k< hypre_SStructUVEntryNUEntries(Uventry); k++) { if (hypre_SStructUVEntryToPart(Uventry, k) == part_crse) { box_to_ranks_cnt[i]++; } } } nrows+= box_to_ranks_cnt[i]; } ncols= hypre_TAlloc(HYPRE_Int, nrows); for (i= 0; i< nrows; i++) { ncols[i]= 1; } rows= hypre_TAlloc(HYPRE_Int, nrows); cols= hypre_TAlloc(HYPRE_Int, nrows); vals= hypre_CTAlloc(HYPRE_Real, nrows); interface_max_stencil_ranks= hypre_TAlloc(HYPRE_Int *, cnt1); interface_max_stencil_cnt = hypre_TAlloc(HYPRE_Int *, cnt1); interface_rank_stencils = hypre_TAlloc(HYPRE_Int *, cnt1); interface_stencil_ranks = hypre_TAlloc(HYPRE_Int *, cnt1); coarse_stencil_cnt = hypre_CTAlloc(HYPRE_Int , cnt1); k= 0; for (i= 0; i< cnt1; i++) { /*----------------------------------------------------------------- * for each coarse interface node, we get a stencil. We compute only * the ranks assuming a maximum size stencil of 27. *-----------------------------------------------------------------*/ interface_max_stencil_ranks[i]= hypre_TAlloc(HYPRE_Int, box_to_ranks_cnt[i]); interface_max_stencil_cnt[i] = hypre_CTAlloc(HYPRE_Int, max_stencil_size); /*----------------------------------------------------------------- * conjugate the coarse node index for determining the stencil * shapes for the Uentry connections. *-----------------------------------------------------------------*/ hypre_CopyIndex(cindex[i], index1); hypre_SetIndex3(index1, -index1[0], -index1[1], -index1[2]); n= 0; for (j= 0; j< box_ranks_cnt[i]; j++) { /*-------------------------------------------------------------- * extract the row rank for a given Uventry. Note that these * are the ranks in the grid of A. Therefore, we grab the index * from the nested_graph Uventry to determine the global rank. * With the rank, find the corresponding Uventry of the graph * of A. The to_ranks now can be extracted out. *--------------------------------------------------------------*/ Uventry = Uventries[ coarse_contrib_Uv[i][j] ]; hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index); hypre_SStructGridFindBoxManEntry(grid, part_fine, index, var1, &boxman_entry); hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &rank, matrix_type); Uventry= hypre_SStructGraphUVEntry(graph, rank-startrank); nUentries= hypre_SStructUVEntryNUEntries(Uventry); for (l= 0; l< nUentries; l++) { if (hypre_SStructUVEntryToPart(Uventry, l) == part_crse) { to_rank = hypre_SStructUVEntryToRank(Uventry, l); rows[k] = rank; cols[k++]= to_rank; /*--------------------------------------------------------- * compute stencil shape for this Uentry. *---------------------------------------------------------*/ hypre_CopyIndex( hypre_SStructUVEntryToIndex(Uventry,l), index ); hypre_AddIndexes(index, index1, 3, index2); MapStencilRank(index2, m); interface_max_stencil_ranks[i][n++]= m; interface_max_stencil_cnt[i][m]++; } } } hypre_TFree(coarse_contrib_Uv[i]); /*----------------------------------------------------------------- * Determine only the distinct stencil ranks for coarse node i. *-----------------------------------------------------------------*/ l= 0; for (j= 0; j< max_stencil_size; j++) { if (interface_max_stencil_cnt[i][j]) { l++; } } coarse_stencil_cnt[i]= l; interface_stencil_ranks[i]= hypre_TAlloc(HYPRE_Int, l); interface_rank_stencils[i]= hypre_TAlloc(HYPRE_Int, max_stencil_size); /*----------------------------------------------------------------- * For each stencil rank, assign one of the stencil_shape_i index. *-----------------------------------------------------------------*/ l= 0; for (j= 0; j< max_stencil_size; j++) { if (interface_max_stencil_cnt[i][j]) { interface_rank_stencils[i][j]= l; interface_stencil_ranks[i][l]= j; l++; } } } /* for (i= 0; i< cnt1; i++) */ hypre_TFree(coarse_contrib_Uv); hypre_TFree(box_ranks_cnt); hypre_TFree(cindex); /*----------------------------------------------------------------- * Extract data from IJ matrix *-----------------------------------------------------------------*/ HYPRE_IJMatrixGetValues(ij_A, nrows, ncols, rows, cols, vals); hypre_TFree(ncols); hypre_TFree(rows); hypre_TFree(cols); /*----------------------------------------------------------------- * Steps 4 & 5: * Compute the arithmetically averaged interface stencils, * and determine the interface stencil weights. * * stencil_vals[l] = averaged stencil coeff for interface * stencil entry l. * common_rank_stencils = final structured coarse stencil entries * for the stencil_shapes that the * interface stencils must collapse to. * common_stencil_ranks = final structured coarse stencil_shape * ranks for the stencil_shapes that the * interface stencils must collapse to. * common_stencil_i = stencil entry of the interface stencil * corresponding to the common * stencil_shape. *-----------------------------------------------------------------*/ k= 0; for (i= 0; i< cnt1; i++) { stencil_vals= hypre_CTAlloc(HYPRE_Real, coarse_stencil_cnt[i]); /*----------------------------------------------------------------- * Compute the arithmetic stencil averages for coarse node i. *-----------------------------------------------------------------*/ for (j= 0; j< box_to_ranks_cnt[i]; j++) { m= interface_max_stencil_ranks[i][j]; l= interface_rank_stencils[i][m]; stencil_vals[l]+= vals[k]/interface_max_stencil_cnt[i][m]; k++; } hypre_TFree(interface_max_stencil_ranks[i]); hypre_TFree(interface_max_stencil_cnt[i]); hypre_TFree(interface_rank_stencils[i]); /*----------------------------------------------------------------- * Determine which stencil has to be formed. This is accomplished * by comparing the coarse grid stencil ranks with the computed * interface stencil ranks. We qsort (if there are more than one * rank) the ranks to give quick comparisons. Note that we need * to swap the elements of stencil_vals & fine_interface_ranks[i]'s * accordingly. *-----------------------------------------------------------------*/ sort= falseV; for (j= 0; j< (coarse_stencil_cnt[i]-1); j++) { if (interface_stencil_ranks[i][j] > interface_stencil_ranks[i][j+1]) { sort= trueV; break; } } if ( (coarse_stencil_cnt[i]>1) && (sort==trueV) ) { temp1= hypre_TAlloc(HYPRE_Int, coarse_stencil_cnt[i]); for (j= 0; j< coarse_stencil_cnt[i]; j++) { temp1[j]= j; } hypre_qsort1(interface_stencil_ranks[i], (HYPRE_Real *) temp1, 0, coarse_stencil_cnt[i]-1); /*--------------------------------------------------------------- * swap the stencil_vals to agree with the rank swapping. *---------------------------------------------------------------*/ temp3 = hypre_TAlloc(HYPRE_Real, coarse_stencil_cnt[i]); for (j=0; j< coarse_stencil_cnt[i]; j++) { m = temp1[j]; temp3[j] = stencil_vals[m]; } for (j=0; j< coarse_stencil_cnt[i]; j++) { stencil_vals[j]= temp3[j]; } hypre_TFree(temp1); hypre_TFree(temp3); } /*----------------------------------------------------------------- * Compute the weights for the averaged stencil contributions. * We need to convert the ranks back to stencil_shapes and then * find the abs of the stencil shape. *-----------------------------------------------------------------*/ temp3= hypre_TAlloc(HYPRE_Real, coarse_stencil_cnt[i]); for (j=0; j< coarse_stencil_cnt[i]; j++) { InverseMapStencilRank(interface_stencil_ranks[i][j], index_temp); AbsStencilShape(index_temp, abs_stencil_shape); temp3[j]= weights[abs_stencil_shape]; } /*----------------------------------------------------------------- * Compare the coarse stencil and the interface stencil and * extract the common stencil shapes. * WE ARE ASSUMING THAT THE COARSE INTERFACE STENCIL HAS SOME * COMMON STENCIL SHAPE WITH THE COARSE STENCIL. *-----------------------------------------------------------------*/ common_rank_stencils= hypre_TAlloc(HYPRE_Int, stencil_size); common_stencil_ranks= hypre_TAlloc(HYPRE_Int, stencil_size); common_stencil_i = hypre_TAlloc(HYPRE_Int, stencil_size); l= 0; m= 0; for (j= 0; j< stencil_size; j++) { while( (l < coarse_stencil_cnt[i]) && (stencil_ranks[j] > interface_stencil_ranks[i][l]) ) { l++; } if (l >= coarse_stencil_cnt[i]) { break; } /*-------------------------------------------------------------- * Check if a common stencil shape rank has been found. *--------------------------------------------------------------*/ if ( (stencil_ranks[j] == interface_stencil_ranks[i][l]) && (l < coarse_stencil_cnt[i]) ) { common_rank_stencils[m]= rank_stencils[ stencil_ranks[j] ]; common_stencil_ranks[m]= stencil_ranks[j]; common_stencil_i[m++] = l; l++; } } /*----------------------------------------------------------------- * Find the contribution and weights for the averaged stencils. *-----------------------------------------------------------------*/ for (j= 0; j< m; j++) { hypre_CopyIndex(hypre_StructStencilElement( stencils, common_rank_stencils[j]), stencil_shape_i); AbsStencilShape(stencil_shape_i, abs_stencil_shape); crse_ptr= hypre_StructMatrixExtractPointerByIndex(crse_smatrix, parents_cnodes[i], stencil_shape_i); /*----------------------------------------------------------------- * For a compact stencil (e.g., -1 <= hypre_Index[i] <= 1, i= 0-2), * the value of abs_stencil_shape can be used to determine the * stencil: * abs_stencil_shape= 3 only corners in 3-d * 2 corners in 2-d; or the centre plane * in 3-d, or e,w,n,s of the bottom * or top plane in 3-d * 1 e,w in 1-d; or e,w,n,s in 2-d; * or the centre plane in 3-d, * or c of the bottom or top plane * in 3-d * 0 c in 1-d, 2-d, or 3-d. *-----------------------------------------------------------------*/ switch(abs_stencil_shape) { case 3: /* corners of 3-d stencil */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= stencil_vals[l]; break; case 2: /* corners in 2-d or edges in 3-d */ if (ndim ==2) { l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= stencil_vals[l]; } else if (ndim == 3) { /*---------------------------------------------------------- * The edge values are weighted sums of the averaged * coefficients. The weights and averaged coefficients must * be found. The contributions are found using the stencil * ranks and the stencil ordering * top: 14 12 13 centre: 5 3 4 bottom 23 21 22 * 11 9 10 2 0 1 20 18 19 * 17 15 16 8 6 7 26 24 25 *----------------------------------------------------------*/ l = common_stencil_ranks[j]; temp1= hypre_TAlloc(HYPRE_Int, 2); switch(l) { case 4: /* centre plane ne */ temp1[0]= 13; temp1[1]= 22; break; case 5: /* centre plane nw */ temp1[0]= 14; temp1[1]= 23; break; case 7: /* centre plane se */ temp1[0]= 16; temp1[1]= 25; break; case 8: /* centre plane sw */ temp1[0]= 17; temp1[1]= 26; break; case 10: /* top plane e */ temp1[0]= 13; temp1[1]= 16; break; case 11: /* top plane w */ temp1[0]= 14; temp1[1]= 17; break; case 12: /* top plane n */ temp1[0]= 13; temp1[1]= 14; break; case 15: /* top plane s */ temp1[0]= 16; temp1[1]= 17; break; case 19: /* bottom plane e */ temp1[0]= 22; temp1[1]= 25; break; case 20: /* bottom plane w */ temp1[0]= 23; temp1[1]= 26; break; case 21: /* bottom plane n */ temp1[0]= 22; temp1[1]= 23; break; case 24: /* bottom plane s */ temp1[0]= 25; temp1[1]= 26; break; } /*------------------------------------------------------- * Add up the weighted contributions of the interface * stencils. This involves searching the ranks of * interface_stencil_ranks. The weights must be averaged. *-------------------------------------------------------*/ l= common_stencil_i[j]; sum= temp3[l]; sum_contrib= sum*stencil_vals[l]; n= 1; for (l= 0; l< 2; l++) { while ( (n < coarse_stencil_cnt[i]) &&(interface_stencil_ranks[i][n] < temp1[l]) ) { n++; } if (n >= coarse_stencil_cnt[i]) { break; } if (interface_stencil_ranks[i][n] == temp1[l]) { sum+= temp3[n]; sum_contrib+= temp3[n]*stencil_vals[n]; n++; } } sum_contrib/= sum; /* average out the weights */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= sum_contrib; hypre_TFree(temp1); } /* else if (ndim == 3) */ break; case 1: /* e,w in 1-d, or edges in 2-d, or faces in 3-d */ if (ndim == 1) { l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= stencil_vals[l]; } else if (ndim == 2) { l = common_stencil_ranks[j]; temp1= hypre_TAlloc(HYPRE_Int, 2); switch(l) { case 1: /* e */ temp1[0]= 4; temp1[1]= 7; break; case 2: /* w */ temp1[0]= 5; temp1[1]= 8; break; case 3: /* n */ temp1[0]= 4; temp1[1]= 5; break; case 6: /* s */ temp1[0]= 7; temp1[1]= 8; break; } /*------------------------------------------------------- * Add up the weighted contributions of the interface * stencils. *-------------------------------------------------------*/ l= common_stencil_i[j]; sum= temp3[l]; sum_contrib= sum*stencil_vals[l]; n= 1; for (l= 0; l< 2; l++) { while ( (n < coarse_stencil_cnt[i]) &&(interface_stencil_ranks[i][n] < temp1[l]) ) { n++; } if (n >= coarse_stencil_cnt[i]) { break; } if (interface_stencil_ranks[i][n] == temp1[l]) { sum+= temp3[n]; sum_contrib+= temp3[n]*stencil_vals[n]; n++; } } sum_contrib/= sum; /* average out the weights */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= sum_contrib; hypre_TFree(temp1); } /* else if (ndim == 2) */ else /* 3-d */ { l = common_stencil_ranks[j]; temp1= hypre_TAlloc(HYPRE_Int, 8); switch(l) { case 1: /* centre plane e */ temp1[0]= 4; temp1[1]= 7; temp1[2]= 10; temp1[3]= 13; temp1[4]= 16; temp1[5]= 19; temp1[6]= 22; temp1[7]= 25; break; case 2: /* centre plane w */ temp1[0]= 5; temp1[1]= 8; temp1[2]= 11; temp1[3]= 14; temp1[4]= 17; temp1[5]= 20; temp1[6]= 23; temp1[7]= 26; break; case 3: /* centre plane n */ temp1[0]= 4; temp1[1]= 5; temp1[2]= 12; temp1[3]= 13; temp1[4]= 14; temp1[5]= 21; temp1[6]= 22; temp1[7]= 23; break; case 6: /* centre plane s */ temp1[0]= 7; temp1[1]= 8; temp1[2]= 15; temp1[3]= 16; temp1[4]= 17; temp1[5]= 24; temp1[6]= 25; temp1[7]= 26; break; case 9: /* top plane c */ for (n= 0; n< 8; n++) { temp1[n]= 10+n; } break; case 18: /* bottom plane c */ for (n= 0; n< 8; n++) { temp1[n]= 19+n; } break; } /*------------------------------------------------------- * Add up the weighted contributions of the interface * stencils. *-------------------------------------------------------*/ l= common_stencil_i[j]; sum= temp3[l]; sum_contrib= sum*stencil_vals[l]; n= 1; for (l= 0; l< 8; l++) { while ( (n < coarse_stencil_cnt[i]) && (interface_stencil_ranks[i][n] < temp1[l]) ) { n++; } if (n >= coarse_stencil_cnt[i]) { break; } if (interface_stencil_ranks[i][n] == temp1[l]) { sum+= temp3[n]; sum_contrib+= temp3[n]*stencil_vals[n]; n++; } } sum_contrib/= sum; /* average out the weights */ l= common_stencil_i[j]; crse_ptr[fine_interface_ranks[i]]= sum_contrib; hypre_TFree(temp1); } /* else */ break; } /* switch(abs_stencil_shape) */ } /* for (j= 0; j< m; j++) */ hypre_TFree(interface_stencil_ranks[i]); hypre_TFree(stencil_vals); hypre_TFree(temp3); hypre_TFree(common_rank_stencils); hypre_TFree(common_stencil_ranks); hypre_TFree(common_stencil_ranks); hypre_TFree(common_stencil_i); } /* for (i= 0; i< cnt1; i++) */ hypre_TFree(box_to_ranks_cnt); hypre_TFree(interface_max_stencil_ranks); hypre_TFree(interface_max_stencil_cnt); hypre_TFree(interface_rank_stencils); hypre_TFree(interface_stencil_ranks); hypre_TFree(coarse_stencil_cnt); hypre_TFree(fine_interface_ranks); hypre_TFree(parents_cnodes); hypre_TFree(vals); /*----------------------------------------------------------- * Box fi is completed. *-----------------------------------------------------------*/ } /* for (fi= 0; fi< box_array_size; fi++) */ hypre_TFree(stencil_ranks); hypre_TFree(rank_stencils); hypre_TFree(cdata_space_ranks); hypre_TFree(box_graph_cnts); for (i= 0; i< box_array_size; i++) { if (box_graph_indices[i]) hypre_TFree(box_graph_indices[i]); } hypre_TFree(box_graph_indices); hypre_TFree(box_starts); hypre_TFree(box_ends); } /* for (var1= 0; var1< nvars; var1++) */ } /* if (nUventries > 0) */ /*-------------------------------------------------------------------------- * STEP 3: * Coarsened f/c interface coefficients can be used to create the * centre components along the coarsened f/c nodes now. Loop over * the coarsened fbox_bdy's and set the centre stencils. *--------------------------------------------------------------------------*/ hypre_ClearIndex(index_temp); for (var1= 0; var1< nvars; var1++) { /* only like variables couple. */ smatrix_var = hypre_SStructPMatrixSMatrix(A_crse, var1, var1); stencils = hypre_SStructPMatrixSStencil(A_crse, var1, var1); stencil_size = hypre_StructStencilSize(stencils); a_ptrs = hypre_TAlloc(HYPRE_Real *, stencil_size); rank_stencils= hypre_TAlloc(HYPRE_Int, max_stencil_size); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); MapStencilRank(stencil_shape_i, rank); rank_stencils[rank]= i; } centre= rank_stencils[0]; cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); hypre_ForBoxI(ci, cgrid_boxes) { A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix_var), ci); fbox_bdy_ci= fbox_bdy[var1][ci]; for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape_i); a_ptrs[i]= hypre_StructMatrixExtractPointerByIndex(smatrix_var, ci, stencil_shape_i); } /*------------------------------------------------------------------ * Loop over the boundaries of each patch inside cgrid_box ci. * These patch boxes must be coarsened to get the correct extents. *------------------------------------------------------------------*/ hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) { fbox_bdy_ci_fi= hypre_BoxArrayArrayBoxArray(fbox_bdy_ci, arrayi); hypre_ForBoxI(fi, fbox_bdy_ci_fi) { fgrid_box= hypre_BoxArrayBox(fbox_bdy_ci_fi, fi); hypre_StructMapFineToCoarse(hypre_BoxIMin(fgrid_box), index_temp, stridef, hypre_BoxIMin(&fine_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(fgrid_box), index_temp, stridef, hypre_BoxIMax(&fine_box)); hypre_CopyIndex(hypre_BoxIMin(&fine_box), cstart); hypre_BoxGetSize(&fine_box, loop_size); hypre_BoxLoop1Begin(ndim, loop_size, A_dbox, cstart, stridec, iA); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,i) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(iA) { for (i= 0; i< stencil_size; i++) { if (i != centre) { a_ptrs[centre][iA]-= a_ptrs[i][iA]; } } } hypre_BoxLoop1End(iA); } /* hypre_ForBoxI(fi, fbox_bdy_ci_fi) */ } /* hypre_ForBoxArrayI(arrayi, fbox_bdy_ci) */ } /* hypre_ForBoxI(ci, cgrid_boxes) */ hypre_TFree(a_ptrs); hypre_TFree(rank_stencils); } /* for (var1= 0; var1< nvars; var1++) */ for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_crse), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A_pmatrix), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); hypre_ForBoxI(ci, cgrid_boxes) { hypre_BoxArrayDestroy(fgrid_crse_extents[var1][ci]); hypre_BoxArrayDestroy(fbox_interior[var1][ci]); hypre_BoxArrayArrayDestroy(fbox_bdy[var1][ci]); hypre_TFree(interior_fboxi[var1][ci]); hypre_TFree(bdy_fboxi[var1][ci]); } hypre_TFree(fgrid_crse_extents[var1]); hypre_TFree(fbox_interior[var1]); hypre_TFree(fbox_bdy[var1]); hypre_TFree(interior_fboxi[var1]); hypre_TFree(bdy_fboxi[var1]); hypre_ForBoxI(fi, fgrid_boxes) { hypre_TFree(cboxi_fboxes[var1][fi]); } hypre_TFree(cboxi_fboxes[var1]); hypre_TFree(cboxi_fcnt[var1]); } hypre_TFree(fgrid_crse_extents); hypre_TFree(fbox_interior); hypre_TFree(fbox_bdy); hypre_TFree(interior_fboxi); hypre_TFree(bdy_fboxi); hypre_TFree(cboxi_fboxes); hypre_TFree(cboxi_fcnt); return 0; }
LAGraph_SortByDegree.c
//------------------------------------------------------------------------------ // LAGraph_SortByDegree: sort a graph by its row or column degree //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // Contributed by Tim Davis, Texas A&M University. //------------------------------------------------------------------------------ // LAGraph_SortByDegree computes a permutation vector P that sorts a graph // by degree (either row or column degree of its adjacency matrix A). // If G is undirected, or if G is directed but is known to have a symmetric // adjacency matrix, then G->rowdegree is used (and byrow is ignored). // Otherwise, if G->rowdegree is used if byrow is true, and G->coldegree is // used if byrow is false. // G->rowdegree or G->coldegree must first be computed. An error is returned // if the required degree vector has not yet been computed. See // LAGraph_Property_RowDegree and LAGraph_Property_ColDegree. // The permutation is in ascending order of degree if ascending is true, and // in descending order otherwise. // Ties are broken by the node id, so the sort is always predicable. Lower // numbered rows/columns always appear before higher ones, if they have the // same degree. // The output is a permutation P where P [k] = i if row i is the kth row in // the permutation (or P [k] = j if column j is the kth column in the // permutation, with byrow false). #define LAGRAPH_FREE_WORK \ { \ LAGraph_Free ((void **) &W, W_size) ; \ LAGraph_Free ((void **) &D, D_size) ; \ } #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE_WORK ; \ LAGraph_Free ((void **) &P, Psize) ; \ } #include "LG_internal.h" int LAGraph_SortByDegree // returns 0 if successful, -1 if failure ( // output int64_t **P_handle, // P is returned as a permutation vector of size n size_t *P_size, // size of P in bytes // input LAGraph_Graph G, // graph of n nodes bool byrow, // if true, sort G->rowdegree, else G->coldegree bool ascending, // sort in ascending or descending order char *msg ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; int64_t *P = NULL ; size_t Psize = 0 ; int64_t *W = NULL ; size_t W_size = 0 ; int64_t *D = NULL ; size_t D_size = 0 ; LG_CHECK (P_handle == NULL, -1, "P is null") ; LG_CHECK (P_size == NULL, -1, "P_size is null") ; (*P_handle) = NULL ; (*P_size) = 0 ; LG_CHECK (LAGraph_CheckGraph (G, msg), -1, "graph is invalid") ; GrB_Vector Degree ; if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED || (G->kind == LAGRAPH_ADJACENCY_DIRECTED && G->A_pattern_is_symmetric == LAGRAPH_TRUE)) { // the pattern of A is known to be symmetric Degree = G->rowdegree ; } else { // A is not known to be symmetric Degree = (byrow) ? G->rowdegree : G->coldegree ; } LG_CHECK (Degree == NULL, -1, "degree property unknown") ; //-------------------------------------------------------------------------- // decide how many threads to use //-------------------------------------------------------------------------- GrB_Index n ; GrB_TRY (GrB_Vector_size (&n, Degree)) ; // TODO: need to use the future GrB_Context #define CHUNK (64*1024) int nthreads ; LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, msg)) ; nthreads = LAGraph_MIN (nthreads, n/CHUNK) ; nthreads = LAGraph_MAX (nthreads, 1) ; //-------------------------------------------------------------------------- // allocate result and workspace //-------------------------------------------------------------------------- P = LAGraph_Malloc (n, sizeof (int64_t), &Psize) ; D = LAGraph_Malloc (n, sizeof (int64_t), &D_size) ; W = LAGraph_Malloc (2*n, sizeof (int64_t), &W_size) ; LG_CHECK (D == NULL || P == NULL || W == NULL, -1, "out of memory") ; int64_t *W0 = W ; int64_t *W1 = W + n ; //-------------------------------------------------------------------------- // construct the pair [D,P] to sort //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < n ; k++) { D [k] = 0 ; P [k] = k ; } // extract the degrees GrB_Index nvals = n ; GrB_TRY (GrB_Vector_extractTuples ((GrB_Index *) W0, W1, &nvals, Degree)) ; if (ascending) { // sort [D,P] in ascending order of degree, tie-breaking on P #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < nvals ; k++) { D [W0 [k]] = W1 [k] ; } } else { // sort [D,P] in descending order of degree, tie-breaking on P #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < nvals ; k++) { D [W0 [k]] = -W1 [k] ; } } LAGraph_Free ((void **) &W, W_size) ; //-------------------------------------------------------------------------- // sort by degrees, with ties by node id //-------------------------------------------------------------------------- LAGraph_TRY (LAGraph_Sort2 (D, P, n, nthreads, msg)) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- LAGRAPH_FREE_WORK ; (*P_handle) = P ; (*P_size) = Psize ; return (0) ; }
utilities.c
// // MIT license // // Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include <math.h> #if !defined(__APPLE__) && !defined(__MACOSX) #include <omp.h> #endif int DirectConv(float * out, float * in, int index, int block_sz, float * kernel, int kernel_sz, int n_blocks) { int ret = 0; int b, j, c; int in_sz = n_blocks * block_sz; #pragma omp parallel for private(j,c) for (b = 0; b < block_sz; b++) { out[b] = 0; double o = 0; // limited by real kernel length for (j = index * block_sz + b, c = 0; c < kernel_sz; c++, j--) { j = (j < 0) ? in_sz - 1 : j; o += in[j] * kernel[c]; } out[b] = (float)o; } return ret; } #if 0 const bool use_pattern = false; static int InsertInput(void *input_hist, int index, void * data, int data_ln) { int err = 0; float * coeff = (float*)(data); float * in_hist = (float*)input_hist + index * data_ln; memcpy(in_hist, data, data_ln * sizeof(float)); return err; } static int adder = 0; static int GenRandomInput(void * data, int input_data_ln, int data_ln) { int err = 0; float * coeff = (float*)(data); memset(coeff, 0, data_ln*sizeof(float)); for (int i = 0; i < input_data_ln; i++) { /* if ( i > 0 ) { coeff[i] = 0; continue; } */ coeff[i] = (2.0f * (float)rand() / (float)RAND_MAX - 1.0f); } // adder++; return err; } static int GenPatternInput(void * data, int input_data_ln, int data_ln, int offset) { float *out = (float*)data; memset(out, 0, data_ln*sizeof(float)); for (int i = 0; i < input_data_ln; i++) { out[i] = i + offset; } return 0; } static int GenRandomFilter(void * filter, int filter_ln) { int err = 0; float * coeff = (float*)(filter); double norm2 = 0; for(int i = 0; i < filter_ln; i++ ) { // if (i< filter_ln - 64 * 2 + 1) { coeff[i] = (2.f * (float)rand() / (float)RAND_MAX - 1.0f); } #if 0 else { coeff[i] = 0; } #endif // norm2 += coeff[i] * coeff[i]; } /* for(int i = 0; i < filter_ln; i++ ) { coeff[i] /= norm2; } */ return err; } #if 0 void fillInput( std::vector<float*> inputs, std::vector<float*> input_accum, std::vector<int> n_input_accum_blcks, std::vector<void *> kernels_ptrs, std::vector<int> kernel_len, std::vector<float*> outputs_v, int proc_block_sz, __int64 p_count, int verify) { static int round = 0; for(int i = 0; i < inputs.size(); i++) { GenRandomInput(inputs[i], proc_block_sz); if ( verify == 1 ) { int index = (int)(p_count % n_input_accum_blcks[i]); InsertInput(input_accum[i], index, inputs[i], proc_block_sz); DirectConv(outputs_v[i], input_accum[i], index, proc_block_sz,(float*)kernels_ptrs[i],kernel_len[i],n_input_accum_blcks[i]); } } round++; } #endif /*----------------------------------------------------------------------------------------------- conv kernel upload ------------------------------------------------------------------------------------------------*/ typedef struct _upload_thread2{ __int64 *upload_count; double *uploading_time; int n_channels; int kernel_upload_buffers; graal::CGraalConv * fhtConv; std::vector<void *> *kernels_ptrs; std::vector<int> *kernel_len; std::vector<int> *upload_id; std::vector<int> *kernel_id; std::vector<cl_mem> *kernel_mems; int *eo_upload_signal; } upload_thread2; typedef struct _upload_thread{ __int64 upload_count; double uploading_time; int method; int n_delays_onconv_switch; int n_channels; int n_sets; int curr_set; int prev_set; int onconv_switch_delay_counter; int kernel_upload_buffers; int single_threaded; graal::CGraalConv * fhtConv; std::vector<std:: vector<void *>> kernels_ptrs; std::vector<std:: vector<int>> kernel_len; std::vector<std::vector<cl_mem>> kernel_mems; std::vector<std::vector<int>> upload_id; std::vector<std::vector<int>> kernel_id; std::vector<int> eo_upload; std::vector<pthread_mutex_t> conv_updt_guard; // prevent conv update while teh conv still in use with the process call int eo_thread; } upload_thread; static void convUpload( __int64 &upload_count, double & uploading_time, int n_channels, int kernel_upload_buffers, graal::CGraalConv & fhtConv, std::vector<void *> &kernels_ptrs, std::vector<int> &kernel_len, std::vector<int> &upload_id, std::vector<int> &kernel_id, std::vector<cl_mem> &kernel_mems, pthread_mutex_t & conv_updt_guard ) { double t0, t1; if (kernel_upload_buffers == 2 ) { fhtConv.getConvBuffers(n_channels,&upload_id[0], &kernel_id[0], (float**)&kernels_ptrs[0]); } for(int i = 0; i < n_channels; i++) { if (use_pattern) GenPatternInput(kernels_ptrs[i], kernel_len[i], kernel_len[i], 0); else GenRandomFilter(kernels_ptrs[i], kernel_len[i]); } if (kernel_upload_buffers == 1 ) { // user managed OCL buffers // this is an emulation // a user manages OCL buffer and generate conv in time domain and upload into OCL buffers before hands fhtConv.getConvBuffers(n_channels,&upload_id[0], &kernel_id[0], &kernel_mems[0]); fhtConv.uploadConvHostPtrs(n_channels, &upload_id[0], &kernel_id[0], (const float**)&kernels_ptrs[0], &kernel_len[0], true); } if (kernel_upload_buffers == 3 ) { // library managed OCL buffers // called to obtain ocl buffers from Graal before user upload conv into them fhtConv.getConvBuffers(n_channels,&upload_id[0], &kernel_id[0], &kernel_mems[0]); fhtConv.uploadConvHostPtrs(n_channels, &upload_id[0], &kernel_id[0], (const float**)&kernels_ptrs[0], &kernel_len[0], true); } t0 = mach_absolute_time(); // pthread_mutex_lock(&conv_updt_guard); if (kernel_upload_buffers == 0 ) { // udate conv from host system ptrs fhtConv.updateConvHostPtrs(n_channels, &upload_id[0], &kernel_id[0], (const float**)&kernels_ptrs[0], &kernel_len[0], true); } else if ( kernel_upload_buffers == 1 ) { // update from user managed OCL buffers fhtConv.updateConv(n_channels, &upload_id[0], &kernel_id[0], &kernel_mems[0], &kernel_len[0], true); // fhtConv.finishUpdate(); } else if ( kernel_upload_buffers == 2 ) { // update from GPU-friendly host system ptrs fhtConv.updateConv(n_channels, &upload_id[0], &kernel_id[0], (const float**)&kernels_ptrs[0], &kernel_len[0], true); // fhtConv.finishUpdate(); } else //( kernel_upload_buffers == 3 ) { // update from lib managed buffers fhtConv.updateConv(n_channels, &upload_id[0], &kernel_id[0], &kernel_len[0], true); } // pthread_mutex_unlock(&conv_updt_guard); t1 = mach_absolute_time(); if ( upload_count > 0 ) { uploading_time += subtractTimes(t1, t0); } upload_count++; } static void * convUploadRoutine( void * _upload_ctl ) { int set = 0; upload_thread * upload_ctl = (upload_thread *)_upload_ctl; do { set = upload_ctl->curr_set; set++; set = (set >= upload_ctl->n_sets) ? 0 : set; pthread_mutex_lock(&upload_ctl->conv_updt_guard[set]); if (!upload_ctl->eo_upload[set]) { for (int i = 0; i < upload_ctl->n_channels; i++) { upload_ctl->upload_id[set][i] = set; } convUpload( upload_ctl->upload_count, upload_ctl->uploading_time, upload_ctl->n_channels, upload_ctl->kernel_upload_buffers, *upload_ctl->fhtConv, upload_ctl->kernels_ptrs[set], upload_ctl->kernel_len[set], upload_ctl->upload_id[set], upload_ctl->kernel_id[set], upload_ctl->kernel_mems[set], upload_ctl->conv_updt_guard[set] ); upload_ctl->eo_upload[set] = 1; upload_ctl->curr_set = set; printf("upload set %d\n", set); } pthread_mutex_unlock(&upload_ctl->conv_updt_guard[set]); if (!upload_ctl->single_threaded) { Sleep(100); } } while (!upload_ctl->eo_thread); return (NULL); } /*------------------------------------------------------------------------------------------------ processing loop -------------------------------------------------------------------------------------------------*/ static void processingLoop( upload_thread &upload_ctl, __int64 & n_actual_loops, double & processing_time, int ext_verify, int n_channels, int input_block_sz, int proc_block_sz, int process_dev_buffers, std::vector<std::vector<int>> &process_upload_id, std::vector<std::vector<int>> &process_kernel_id, std::vector<std::vector<float*>> &inputs, std::vector<std::vector<float*>> &outputs, std::vector<std::vector<int>> &n_input_accum_blcks, std::vector<std::vector<float*>> &input_accum, std::vector<std::vector<float*>> &outputs_v ) { double t0, t1; int sample_mismatch = 0; int set = upload_ctl.curr_set; int prev_set = upload_ctl.prev_set; graal::CGraalConv & fhtConv = *upload_ctl.fhtConv; std::vector<void *> &kernels_ptrs = upload_ctl.kernels_ptrs[set]; std::vector<int> &kernel_len = upload_ctl.kernel_len[set]; pthread_mutex_t & conv_updt_guard = upload_ctl.conv_updt_guard[set]; for( int c = 0; c < n_channels && !sample_mismatch; c++) { if (use_pattern) GenPatternInput(inputs[0][c], input_block_sz, proc_block_sz, c * 10000); else // GenRandomInput(inputs[0][c], proc_block_sz); GenRandomInput(inputs[0][c], input_block_sz, proc_block_sz); } t0 = mach_absolute_time(); int n_real_rounds = 0; // precompute cpu version if (ext_verify) { for (int c = 0; c < n_channels && !sample_mismatch; c++) { int uploadId = process_upload_id[set][c]; int convId = process_kernel_id[set][c]; n_real_rounds = fhtConv.getRoundCounter(0, convId); int index = (int)(n_real_rounds % n_input_accum_blcks[uploadId][convId]); InsertInput(input_accum[0][convId], index, inputs[0][c], proc_block_sz); if (set == prev_set) { DirectConv(outputs_v[set][c], input_accum[0][convId], index, proc_block_sz, (float*)kernels_ptrs[c], kernel_len[c], n_input_accum_blcks[uploadId][convId]); } else { switch (upload_ctl.method) { case graal::ALG_UNIFORMED: default: DirectConv(outputs_v[prev_set][c], input_accum[0][convId], index, proc_block_sz, (float*)upload_ctl.kernels_ptrs[prev_set][c], upload_ctl.kernel_len[prev_set][c], n_input_accum_blcks[uploadId][convId]); DirectConv(outputs_v[set][c], input_accum[0][convId], index, proc_block_sz, (float*)kernels_ptrs[c], kernel_len[c], n_input_accum_blcks[uploadId][convId]); for (int i = 0; i < proc_block_sz; i++) { outputs_v[set][c][i] = (outputs_v[prev_set][c][i] * (float)i + outputs_v[set][c][i] * (float)(proc_block_sz - i)) / (float)proc_block_sz; } break; case graal::ALG_UNI_HEAD_TAIL: if (upload_ctl.onconv_switch_delay_counter == 0) { // additional previous run to get toa second stage DirectConv(outputs_v[set][c], input_accum[0][convId], index, proc_block_sz, (float*)upload_ctl.kernels_ptrs[prev_set][c], upload_ctl.kernel_len[prev_set][c], n_input_accum_blcks[uploadId][convId]); } if (upload_ctl.onconv_switch_delay_counter == 1) { DirectConv(outputs_v[prev_set][c], input_accum[0][convId], index, proc_block_sz, (float*)upload_ctl.kernels_ptrs[prev_set][c], upload_ctl.kernel_len[prev_set][c], n_input_accum_blcks[uploadId][convId]); DirectConv(outputs_v[set][c], input_accum[0][convId], index, proc_block_sz, (float*)kernels_ptrs[c], kernel_len[c], n_input_accum_blcks[uploadId][convId]); #if 1 for (int i = 0; i < proc_block_sz; i++) { outputs_v[set][c][i] = (outputs_v[prev_set][c][i] * (float)i + outputs_v[set][c][i] * (float)(proc_block_sz - i)) / (float)proc_block_sz; } #endif } break; } } } } pthread_mutex_lock(&conv_updt_guard); if (set == prev_set) { fhtConv.process(n_channels, &process_upload_id[set][0], &process_kernel_id[set][0], &inputs[0][0], &outputs[set][0]); } else { switch (upload_ctl.method) { case graal::ALG_UNIFORMED: default: // prev conv, do not advance time fhtConv.process(n_channels, &process_upload_id[prev_set][0], &process_kernel_id[prev_set][0], &inputs[0][0], &outputs[prev_set][0], 0, 0); // new conv, previous input advance time fhtConv.process(n_channels, &process_upload_id[set][0], &process_kernel_id[set][0], &inputs[0][0], &outputs[set][0], 1); for (int c = 0; c < n_channels && !sample_mismatch; c++) { for (int i = 0; i < proc_block_sz; i++) { outputs[set][c][i] = (outputs[prev_set][c][i] * (float)i + outputs[set][c][i] * (float)(proc_block_sz - i)) / (float)proc_block_sz; } } // free conv slot for the next upload upload_ctl.prev_set = upload_ctl.curr_set; upload_ctl.eo_upload[prev_set] = 0; break; case graal::ALG_UNI_HEAD_TAIL: if (upload_ctl.onconv_switch_delay_counter == 0) { // previous conv run, return data, advance the internal Graal time fhtConv.process(n_channels, &process_upload_id[prev_set][0], &process_kernel_id[prev_set][0], &inputs[0][0], &outputs[set][0]); // new conv run, do not return data, skip the first (head ) stage, do not advance the timer : flash the previous switch state fhtConv.process(n_channels, &process_upload_id[set][0], &process_kernel_id[set][0], &inputs[0][0], &outputs[prev_set][0], 1, 0/*, 1*/); #if 0 if (ext_verify) { int sample_mismatch = 0; for (int c = 0; c < n_channels && !sample_mismatch; c++) { for (int i = 0; i < proc_block_sz/* && !sample_mismatch*/; i++) { float c_val = outputs_v[set][c][i]; float g_val = outputs[set][c][i]; if (!_isnan(c_val) && !_isnan(g_val) && abs(c_val - g_val) > 0.01) { printf("Prev mismatch s=%d r=%d rr=%d c=%d i=%d c_v=%f g_v=%f\n", set, (int)n_actual_loops, n_real_rounds, c, i, c_val, g_val); sample_mismatch = 1; } } } if (sample_mismatch) exit(0); // print_interval = 2; } #endif } if (upload_ctl.onconv_switch_delay_counter == 1) { // last previous conv run, return data, do not advance the internal Graal time (skip 2nd stage) fhtConv.process(n_channels, &process_upload_id[prev_set][0], &process_kernel_id[prev_set][0], &inputs[0][0], &outputs[prev_set][0], 0, 0); // new conv run, return data, previous input, advance the timer fhtConv.process(n_channels, &process_upload_id[set][0], &process_kernel_id[set][0], &inputs[0][0], &outputs[set][0],1); #if 0 if (ext_verify) { int sample_mismatch = 0; for( int c = 0; c < n_channels && !sample_mismatch; c++) { for(int i = 0; i < proc_block_sz/* && !sample_mismatch*/; i++) { float c_val = outputs_v[prev_set][c][i]; float g_val = outputs[prev_set][c][i]; if (!_isnan(c_val) && !_isnan(g_val) && abs(c_val - g_val) > 0.01) { printf("Prev mismatch s=%d r=%d rr=%d c=%d i=%d c_v=%f g_v=%f\n", set, (int)n_actual_loops, n_real_rounds, c, i, c_val, g_val); sample_mismatch = 1; } } } if (sample_mismatch) exit(0); // print_interval = 2; } #endif #if 1 for (int c = 0; c < n_channels && !sample_mismatch; c++) { for (int i = 0; i < proc_block_sz; i++) { outputs[set][c][i] = (outputs[prev_set][c][i] * (float)i + outputs[set][c][i] * (float)(proc_block_sz - i)) / (float)proc_block_sz; } } #endif // free conv slot for the next upload upload_ctl.prev_set = upload_ctl.curr_set; upload_ctl.eo_upload[prev_set] = 0; } upload_ctl.onconv_switch_delay_counter++; upload_ctl.onconv_switch_delay_counter = (upload_ctl.onconv_switch_delay_counter >= upload_ctl.n_delays_onconv_switch) ? 0 : upload_ctl.onconv_switch_delay_counter; break; } } pthread_mutex_unlock(&conv_updt_guard); t1 = mach_absolute_time(); if (n_actual_loops > 0 ) { processing_time += subtractTimes(t1, t0); } int print_interval = 100; #if 1 if (ext_verify) { for( int c = 0; c < n_channels && !sample_mismatch; c++) { for (int i = 0; i < input_block_sz && !sample_mismatch; i++) { float c_val = outputs_v[set][c][i]; float g_val = outputs[set][c][i]; if ( !_isnan( c_val) && !_isnan(g_val) && abs(c_val - g_val) > 0.01 ) { printf("Pipeline mismatch s=%d r=%d rr=%d c=%d i=%d c_v=%f g_v=%f\n", set, (int)n_actual_loops, n_real_rounds, c, i, c_val, g_val); sample_mismatch = 1; // exit(0); } } } print_interval = 2; } #endif if (!sample_mismatch && ( n_actual_loops % print_interval ) == 0 && n_actual_loops > 0 ) { printf("Passed set %d round %d\n", set, (int)n_actual_loops); } else if ( sample_mismatch) { // exit(0); } n_actual_loops++; } static void Usage(void ){ printf(">amdrvrbdrv.ex [arguments]\n"); printf("arguments:\n"); printf("-b_sz <block size>\n"); printf("-s_r <sample rate>\n"); printf("-n_l <stream duration in blocks>\n"); printf("-n_i <# of instances>\n"); printf("-i_f <input data> (duration in sec).\n"); printf("-k_f <kernel data> (duration in sec).\n"); printf("-alg 0|1|2 0,1 - classic, 2 - head-tail.\n"); printf("-fft use FFT transform (classic), otherwise FHT.\n"); // printf("-fir_g <filter size> - FIR filter size( separate pipeline).\n"); printf("-v_e <0|1> - external verification with per sample matching(1).\n"); // printf("-v_i <0|1|2|3> - internal verification, stream 1st only(1), 2nd only(2), both(3).\n"); printf("-kub <0|1|2|3> - kernel upload type 0: host ptr 1: client OCL 2: device ptr 3: lib OCL"); printf("-mt - use multi-threaded test"); } #define __MAX_VST_BLOCKS__ 2048 #define __DEFAULT_STREAM_DIR__ "\\Users\\alyashev\\Music\\" int main( int argc, char* argv[] ) { int n_instances = 1; __int64 num_offline_loops = 10; __int64 num_conv_updates; int block_size = 1024; int sample_rate = 48000; int per_sample_match = 0; //int FHT_2streams = __INIT_FLAG_2STREAMS__; //int heterogen = 0; //__INIT_FLAG_HETEROGEN__; //int fht = __INIT_FLAG_FHT__; int fft = 0; //__INIT_FLAG_FFT__ //int fir = 0; // __INIT_FLAG_FIR__ int ext_verify = 0; int verification = 0; //__INIT_FLAG_VER_TRANSFORM1__; // | __INIT_FLAG_VER_TRANSFORM2__; const char * input_fl = NULL; const char * kern_fl = NULL; char * inp_file = NULL; char * kern_file = NULL; int n_input_channels = 1; int n_sub_channels = 2; // 1 - mono, 2 - stereo float secs = 2; float in_secs = 0; int n_firs = 0; //int fir_sz[64]; //float * fir_data[64]; int run_flags = 0;//__PROCESSING_FLAG_VERIFY_TRANSFORM__; int out_file = 0; int process_dev_buffers = 0; int kernel_upload_buffers = 3; // 0 - host ptrs, 1 - client OCL buffers, 2 - dev ptrs, 3 - lib OCL buffers int single_threaded = 1; // 0 - multi, 1 - single int method = 0; for ( int i = 0; i < argc; i++ ) { if ( !strcmp(argv[i], "-i_f") && i < argc -1 && argv[i + 1] != 0) { if ( isdigit(argv[i+1][0]) ) { input_fl = NULL; in_secs = atof(argv[++i]); } else { size_t nm_len= strlen(argv[i + 1]); inp_file = (char*)malloc(nm_len + 1); assert(inp_file); strcpy(inp_file,argv[++i]); input_fl = inp_file; } } else if ( !strcmp(argv[i], "-k_f") && i < argc -1 && argv[i + 1] != 0) { if ( isdigit(argv[i+1][0]) ) { kern_fl = NULL; secs = atof(argv[++i]); } else { size_t nm_len= strlen(argv[i + 1]); kern_file = (char*)malloc(nm_len + 1); assert(kern_file); strcpy(kern_file,argv[++i]); kern_fl = kern_file; } } #if 0 else if ( !strcmp(argv[i], "-fir_g") && i < argc - 1 && argv[i + 1] != 0) { if ( isdigit(argv[i+1][0]) ) { fir_sz[n_firs++] = atoi(argv[++i]); } fir = __INIT_FLAG_FIR__; } #endif else if ( !strcmp(argv[i], "-n_i") && i < argc -1 && argv[i + 1] != 0) { n_instances = atoi(argv[++i]); } else if ( !strcmp(argv[i], "-s_r") && i < argc -1 && argv[i + 1] != 0) { sample_rate = atoi(argv[++i]); } else if ( !strcmp(argv[i], "-n_l") && i < argc -1 && argv[i + 1] != 0) { num_offline_loops = atoi(argv[++i]); } else if ( !strcmp(argv[i], "-v_e") && i < argc -1 && argv[i + 1] != 0) { ext_verify = atoi(argv[++i]); // per_sample_match = atoi(argv[++i]); } #if 0 else if ( !strcmp(argv[i], "-v_i") && i < argc -1 && argv[i + 1] != 0) { int v = atoi(argv[++i]); run_flags = (v == 1) ? __PROCESSING_FLAG_VERIFY_TRANSFORM1__ : (v==2) ? __PROCESSING_FLAG_VERIFY_TRANSFORM2__ : (v==3) ? __PROCESSING_FLAG_VERIFY_TRANSFORM1__ | __PROCESSING_FLAG_VERIFY_TRANSFORM1__ : 0; } #endif else if ( !strcmp(argv[i], "-b_sz") && i < argc -1 && argv[i + 1] != 0) { block_size = atoi(argv[++i]); } else if (!strcmp(argv[i], "-alg") && i < argc - 1 && argv[i + 1] != 0) { method = atoi(argv[++i]); } else if (!strcmp(argv[i], "-m_t") && i < argc - 1 && argv[i + 1] != 0) { single_threaded = (atoi(argv[++i]) == 1) ? 0 : 1; } else if ( !strcmp(argv[i], "-fft") ) { fft = true; } else if (!strcmp(argv[i], "-alg") && i < argc - 1 && argv[i + 1] != 0) { method = atoi(argv[++i]); } else if (!strcmp(argv[i], "-mt")) { single_threaded = 0; } else if (!strcmp(argv[i], "-kub") && i < argc - 1 && argv[i + 1] != 0) { kernel_upload_buffers = atoi(argv[++i]); } else if ( !strcmp(argv[i], "-h") || !strcmp(argv[i], "-help")) { Usage(); exit(0); } else if ( i > 0 ) { printf ("Uknown argument: %s. Bailing out!\n", argv[i]); Usage(); exit(-1); } } input_fl = (input_fl==NULL && in_secs == 0)? "clicks" : input_fl; kern_fl = (kern_fl==NULL && secs == 0) ? ((ext_verify || verification ) ? "Nice Drum Room": "ad_10sec_48k") : kern_fl; //"St Nicolaes Church"; char input_str[1024]; if ( input_fl ) { strcpy(input_str, input_fl); } else { sprintf(input_str, "%5.1f", in_secs); } char kernel_str[1024]; if ( kern_fl ) { strcpy(kernel_str, kern_fl); } else { sprintf(kernel_str, "%5.1f", secs); } std:: string meth_s; meth_s = ((method == graal::ALG_ANY || method == graal::ALG_UNI_HEAD_TAIL) && !fft) ? "uniform head-tail" : "uniform classic"; printf("AMD Graal library. Transform %s\n", (fft)? "FFT" : "FHT"); printf("Arguments:\n"); printf("method %34s\n", meth_s.c_str()); printf("multi-threaded %26s\n", (single_threaded == 1) ? "no" : "yes"); printf("block %35d\n", block_size); printf("sample rate %29d\n",sample_rate); printf("stream duration in blocks %15d\n",num_offline_loops); printf("stream duration in sec %18.1f\n",(float)(num_offline_loops * block_size) / (float)sample_rate); printf("# of instances %26d\n",n_instances); printf("input data %*s\n", 30, input_str); printf("kernel data %*s\n", 29, kernel_str); printf("external verification %*s\n", 19, (ext_verify == 1) ? "yes" : "no"); int err = 0; int n_samples = block_size; //int n_kernel_samples; int bitPersample = 16; int n_channels = n_input_channels * n_sub_channels; size_t kern_ln = 0; double processing_time = 0.; double overlaping_processing_time = 0; double transfer_time = 0; kern_ln = (size_t)(secs * sample_rate); processing_time = 0.; transfer_time = 0; int n_sets = 2; n_channels = n_instances*n_sub_channels; upload_thread upload_ctl; memset(&upload_ctl, 0, sizeof(upload_thread)); upload_ctl.method = method; // graal::ALG_UNI_HEAD_TAIL; // ALG_UNIFORMED; upload_ctl.n_sets = n_sets; // to make it 0 on the first step; upload_ctl.curr_set = upload_ctl.n_sets; upload_ctl.n_channels = n_channels; upload_ctl.single_threaded = single_threaded; upload_ctl.upload_id.resize(n_sets); upload_ctl.kernel_id.resize(n_sets); upload_ctl.eo_upload.resize(n_sets); upload_ctl.kernels_ptrs.resize(n_sets); upload_ctl.kernel_len.resize(n_sets); upload_ctl.kernel_mems.resize(n_sets); upload_ctl.conv_updt_guard.resize(n_sets); switch (upload_ctl.method) { case graal::ALG_UNI_HEAD_TAIL: upload_ctl.n_delays_onconv_switch = 2; break; case graal::ALG_UNIFORMED: case graal::ALG_ANY: default: upload_ctl.n_delays_onconv_switch = 0; break; } std::vector<std::vector<int>> process_upload_id(n_sets); std::vector<std::vector<int>> process_kernel_id(n_sets); std::vector<std::vector<float*>> inputs(n_sets); std::vector<std::vector<float*>> input_accum(n_sets); std::vector<std::vector<int>> n_input_accum_blcks(n_sets); std::vector<std::vector<float*>> outputs(n_sets); std::vector<std::vector<float*>> outputs_v(n_sets); // instantiate grral conv library if (fft) { printf("Using the clFFT based convolution\n"); upload_ctl.fhtConv = new graal::CGraalConv_clFFT; } else { printf("Using the FHT based convolution\n"); upload_ctl.fhtConv = new graal::CGraalConv; } // initialize the library upload_ctl.fhtConv->initializeConv(n_channels, (int)kern_ln, block_size, n_sets , upload_ctl.method ); // exit(0); int proc_block_sz = upload_ctl.fhtConv->getInputBlockSz(); // interface type upload_ctl.kernel_upload_buffers = kernel_upload_buffers; for( int j = 0; j < n_sets; j++) { upload_ctl.eo_upload[j] = 0; // initialize guards pthread_mutex_init (&upload_ctl.conv_updt_guard[j], NULL); upload_ctl.kernel_id[j].resize(n_channels); upload_ctl.upload_id[j].resize(n_channels); process_upload_id[j].resize(n_channels); process_kernel_id[j].resize(n_channels); upload_ctl.kernels_ptrs[j].resize(n_channels); upload_ctl.kernel_len[j].resize(n_channels); // host arbitrary pointers inputs[j].resize(n_channels); outputs[j].resize(n_channels); // OCL mems upload_ctl.kernel_mems[j].resize(n_channels); outputs_v[j].resize(n_channels); n_input_accum_blcks[j].resize(n_channels); input_accum[j].resize(n_channels); for(int i = 0; i < n_channels; i++) { upload_ctl.kernel_id[j][i] = i; upload_ctl.upload_id[j][i] = j; process_upload_id[j][i] = j; process_kernel_id[j][i] = i; upload_ctl.kernel_len[j][i] = (int)kern_ln; // host arbitrary pointers if ( !process_dev_buffers ) { inputs[j][i] = (float*)malloc(proc_block_sz* sizeof(float)); outputs[j][i] = (float*)malloc(proc_block_sz* sizeof(float)); } outputs_v[j][i] = (float*)malloc(proc_block_sz* sizeof(float)); n_input_accum_blcks[j][i] = 1 + (upload_ctl.kernel_len[j][i] + proc_block_sz - 1) / proc_block_sz; input_accum[j][i] = (float*)malloc(n_input_accum_blcks[j][i] * proc_block_sz * sizeof(float)); memset(input_accum[j][i], 0, n_input_accum_blcks[j][i] * proc_block_sz * sizeof(float)); } } // upload kernels if (kernel_upload_buffers == 0 || kernel_upload_buffers == 1 || kernel_upload_buffers == 3) { for( int j = 0; j < n_sets; j++) { for (int i = 0; i < upload_ctl.kernels_ptrs[j].size(); i++) { upload_ctl.kernels_ptrs[j][i] = malloc(upload_ctl.kernel_len[j][i] * sizeof(float)); } } } // process device buffers /* if ( process_dev_buffers ) { fhtConv.getDevInputPtrs(n_channels, 0, &kernel_id[0], &inputs[0][0]); } */ pthread_t t1; __int64 n_process_loops = 1; if (!upload_ctl.single_threaded) { num_conv_updates = num_offline_loops; upload_ctl.eo_thread = 0; pthread_create(&t1, NULL, convUploadRoutine, &upload_ctl); } else { upload_ctl.eo_thread = 1; num_conv_updates = num_offline_loops / 40; num_conv_updates = (0 < num_conv_updates) ? num_conv_updates : 1; n_process_loops = num_offline_loops / num_conv_updates; } int sample_mismatch = 0; __int64 r_count = 0; #if 1 if (upload_ctl.single_threaded && !ext_verify) { convUploadRoutine(&upload_ctl); } #endif for( __int64 u_count = 0; u_count < num_conv_updates + 1 && !sample_mismatch; u_count++) { #if 1 if (upload_ctl.single_threaded && ext_verify) { convUploadRoutine(&upload_ctl); } #endif int i = 0; while (true ) { if (upload_ctl.curr_set < upload_ctl.n_sets && upload_ctl.eo_upload[upload_ctl.curr_set]) { // pthread_mutex_lock(&upload_ctl.conv_updt_guard[j]); for(int k = 0; k < upload_ctl.n_channels;k++) { process_upload_id[upload_ctl.curr_set][k] = upload_ctl.upload_id[upload_ctl.curr_set][k]; } processingLoop( upload_ctl, r_count, processing_time, ext_verify, n_channels, block_size, proc_block_sz, process_dev_buffers, process_upload_id, process_kernel_id, inputs, outputs, n_input_accum_blcks, input_accum, outputs_v ); // pthread_mutex_unlock(&upload_ctl.conv_updt_guard[j]); } if (!upload_ctl.single_threaded) { Sleep(0); } if (upload_ctl.single_threaded) { i++; if (i >= n_process_loops) { break; } } else if (r_count >= num_offline_loops) { break; } } } if (!upload_ctl.single_threaded) { upload_ctl.eo_thread = 1; pthread_join(t1, NULL); } if (kernel_upload_buffers == 0 || kernel_upload_buffers == 1 || kernel_upload_buffers == 3) { for( int j = 0; j < n_sets; j++) { for(int i = 0; i < upload_ctl.kernels_ptrs[j].size(); i++) { free(upload_ctl.kernels_ptrs[j][i]); } } } for( int j = 0; j < n_sets; j++) { pthread_mutex_destroy (&upload_ctl.conv_updt_guard[j]); for(int i = 0; i < inputs[j].size(); i++) { if (!process_dev_buffers) { free(inputs[j][i]); free(outputs[j][i]); } free(input_accum[j][i]); free(outputs_v[j][i]); } } delete upload_ctl.fhtConv; __int64 upload_count = (upload_ctl.upload_count - 1 <= 0) ? 1 : (upload_ctl.upload_count - 1); printf( "Channels: %d upload:%6.2fms processing:%6.2fms\n", n_channels, (upload_ctl.uploading_time/upload_count), (processing_time/(double) r_count) ); return(0); } #endif
an9-stream.c
/* * Copyright (c) 2017, Cray Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ /*-----------------------------------------------------------------------*/ /* Crossroads/NERSC9 STREAM Bandwidth Benchmark Adapted from the original STREAM Benchmark by John D. McCalpin Original copyright is 1991 - 2013: John D. McCalpin Modifications for C/N9 Benchmark: - Array allocations use the heap and are no longer static - Modifications to OpenMP function calls */ /*-----------------------------------------------------------------------*/ #include <float.h> #include <limits.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #ifndef STREAM_ARRAY_SIZE #define STREAM_ARRAY_SIZE 100000000 #endif #define NTIMES 10 #define OFFSET 0 #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x, y) ((x) < (y) ? (x) : (y)) #endif #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif #define STREAM_RESTRICT __restrict__ static STREAM_TYPE *STREAM_RESTRICT a; static STREAM_TYPE *STREAM_RESTRICT b; static STREAM_TYPE *STREAM_RESTRICT c; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX, FLT_MAX, FLT_MAX, FLT_MAX}; static char *label[4] = { "Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = {2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE}; double mysecond(); void checkSTREAMresults(); int an9_stream_main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM APEX Crossroads/N9 Memory Bandwidth\n"); printf( "(Based on the original STREAM benchmark by John D. McCalpin)\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); const unsigned long long int array_size = (sizeof(STREAM_TYPE) * (STREAM_ARRAY_SIZE + OFFSET)); printf("Array size = %llu (elements), Offset = %d (elements)\n", (unsigned long long)array_size, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.0), BytesPerWord * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.0 / 1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024.), (3.0 * BytesPerWord) * ((double)STREAM_ARRAY_SIZE / 1024.0 / 1024. / 1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first " "iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); printf("Allocating arrays ...\n"); posix_memalign((void **)&a, 64, array_size); posix_memalign((void **)&b, 64, array_size); posix_memalign((void **)&c, 64, array_size); #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf("Number of Threads counted = %i\n", k); #endif printf("Populating values and performing first touch ... \n"); /* Get initial value for system clock. */ #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf("Population of values is complete.\n"); printf(HLINE); if ((quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int)t); printf(" (= %d clock ticks)\n", (int)(t / quantum)); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { times[0][k] = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) { c[j] = a[j]; } times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) { b[j] = scalar * c[j]; } times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) { c[j] = a[j] + b[j]; } times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) { a[j] = b[j] + scalar * c[j]; } times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k = 1; k < NTIMES; k++) /* note -- skip first iteration */ { for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf( "Function Best Rate MB/s Avg time Min time Max time\n"); for (j = 0; j < 4; j++) { avgtime[j] = avgtime[j] / (double)(NTIMES - 1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j] / mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while (((t2 = mysecond()) - t1) < 1.0E-6) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)(1.0E6 * (timesfound[i] - timesfound[i - 1])); minDelta = MIN(minDelta, MAX(Delta, 0)); } return (minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults() { STREAM_TYPE aj, bj, cj, scalar; STREAM_TYPE aSumErr, bSumErr, cSumErr; STREAM_TYPE aAvgErr, bAvgErr, cAvgErr; double epsilon; ssize_t j; int k, ierr, err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k = 0; k < NTIMES; k++) { cj = aj; bj = scalar * cj; cj = aj + bj; aj = bj + scalar * cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: // %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE)STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n", sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr / aj) > epsilon) { err++; printf("Failed Validation on array a[], AvgRelAbsErr > epsilon " "(%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: " "%e\n", aj, aAvgErr, abs(aAvgErr) / aj); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(a[j] / aj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, " "expected: %e, observed: %e, " "relative error: %e\n", j, aj, a[j], abs((aj - a[j]) / aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n", ierr); } if (abs(bAvgErr / bj) > epsilon) { err++; printf("Failed Validation on array b[], AvgRelAbsErr > epsilon " "(%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: " "%e\n", bj, bAvgErr, abs(bAvgErr) / bj); printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(b[j] / bj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, " "expected: %e, observed: %e, " "relative error: %e\n", j, bj, b[j], abs((bj - b[j]) / bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n", ierr); } if (abs(cAvgErr / cj) > epsilon) { err++; printf("Failed Validation on array c[], AvgRelAbsErr > epsilon " "(%e)\n", epsilon); printf(" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: " "%e\n", cj, cAvgErr, abs(cAvgErr) / cj); printf(" AvgRelAbsErr > Epsilon (%e)\n", epsilon); ierr = 0; for (j = 0; j < STREAM_ARRAY_SIZE; j++) { if (abs(c[j] / cj - 1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, " "expected: %e, observed: %e, " "relative error: %e\n", j, cj, c[j], abs((cj - c[j]) / cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n", ierr); } if (err == 0) { printf("Solution Validates: avg error less than %e on all " "three arrays\n", epsilon); } #ifdef VERBOSE printf("Results Validation Verbose Results: \n"); printf(" Expected a(1), b(1), c(1): %f %f %f \n", aj, bj, cj); printf(" Observed a(1), b(1), c(1): %f %f %f \n", a[1], b[1], c[1]); printf(" Rel Errors on a, b, c: %e %e %e \n", abs(aAvgErr / aj), abs(bAvgErr / bj), abs(cAvgErr / cj)); #endif return; }
client_utils.h
// Copyright (c) 2020 - present Advanced Micro Devices, Inc. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #ifndef CLIENT_UTILS_H #define CLIENT_UTILS_H #include <algorithm> #include <complex> #include <iostream> #include <mutex> #include <numeric> #include <omp.h> #include <random> #include <tuple> #include <vector> #include "../shared/printbuffer.h" #include "rocfft.h" #include <hip/hip_runtime_api.h> static const size_t ONE_GiB = 1 << 30; // Determine the size of the data type given the precision and type. template <typename Tsize> inline Tsize var_size(const rocfft_precision precision, const rocfft_array_type type) { size_t var_size = 0; switch(precision) { case rocfft_precision_single: var_size = sizeof(float); break; case rocfft_precision_double: var_size = sizeof(double); break; } switch(type) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_hermitian_interleaved: var_size *= 2; break; default: break; } return var_size; } // Container class for test parameters. class rocfft_params { public: // All parameters are row-major. std::vector<size_t> length; std::vector<size_t> istride; std::vector<size_t> ostride; size_t nbatch = 1; rocfft_precision precision = rocfft_precision_double; rocfft_transform_type transform_type = rocfft_transform_type_complex_forward; rocfft_result_placement placement = rocfft_placement_inplace; size_t idist = 0; size_t odist = 0; rocfft_array_type itype = rocfft_array_type_complex_interleaved; rocfft_array_type otype = rocfft_array_type_complex_interleaved; std::vector<size_t> ioffset = {0, 0}; std::vector<size_t> ooffset = {0, 0}; std::vector<size_t> isize; std::vector<size_t> osize; // run testing load/store callbacks bool run_callbacks = false; static constexpr double load_cb_scalar = 0.457813941; static constexpr double store_cb_scalar = 0.391504938; // Given an array type, return the name as a string. std::string array_type_name(const rocfft_array_type type) const { switch(type) { case rocfft_array_type_complex_interleaved: return "rocfft_array_type_complex_interleaved"; case rocfft_array_type_complex_planar: return "rocfft_array_type_complex_planar"; case rocfft_array_type_real: return "rocfft_array_type_real"; case rocfft_array_type_hermitian_interleaved: return "rocfft_array_type_hermitian_interleaved"; case rocfft_array_type_hermitian_planar: return "rocfft_array_type_hermitian_planar"; case rocfft_array_type_unset: return "rocfft_array_type_unset"; } return ""; } // Convert to string for output. std::string str(const std::string& separator = ", ") const { std::stringstream ss; ss << "length:"; for(auto i : length) ss << " " << i; ss << separator; ss << "istride:"; for(auto i : istride) ss << " " << i; ss << separator; ss << "idist: " << idist << separator; ss << "ostride:"; for(auto i : ostride) ss << " " << i; ss << separator; ss << "odist: " << odist << separator; ss << "batch: " << nbatch << separator; ss << "isize:"; for(auto i : isize) ss << " " << i; ss << separator; ss << "osize:"; for(auto i : osize) ss << " " << i; ss << separator; ss << "ioffset:"; for(auto i : ioffset) ss << " " << i; ss << separator; ss << "ooffset:"; for(auto i : ooffset) ss << " " << i; ss << separator; if(placement == rocfft_placement_inplace) ss << "in-place"; else ss << "out-of-place"; ss << separator; ss << array_type_name(itype) << " -> " << array_type_name(otype) << separator; if(precision == rocfft_precision_single) ss << "single-precision"; else ss << "double-precision"; ss << separator; ss << "ilength:"; for(const auto i : ilength()) ss << " " << i; ss << separator; ss << "olength:"; for(const auto i : olength()) ss << " " << i; return ss.str(); } // Stream output operator (for gtest, etc). friend std::ostream& operator<<(std::ostream& stream, const rocfft_params& params) { stream << params.str(); return stream; } // Dimension of the transform. size_t dim() const { return length.size(); } std::vector<size_t> ilength() const { auto ilength = length; if(transform_type == rocfft_transform_type_real_inverse) ilength[dim() - 1] = ilength[dim() - 1] / 2 + 1; return ilength; } std::vector<size_t> olength() const { auto olength = length; if(transform_type == rocfft_transform_type_real_forward) olength[dim() - 1] = olength[dim() - 1] / 2 + 1; return olength; } size_t nbuffer(const rocfft_array_type type) const { switch(type) { case rocfft_array_type_real: case rocfft_array_type_complex_interleaved: case rocfft_array_type_hermitian_interleaved: return 1; case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: return 2; case rocfft_array_type_unset: return 0; } } // Number of input buffers size_t nibuffer() const { return nbuffer(itype); } // Number of output buffers size_t nobuffer() const { return nbuffer(otype); } auto compute_isize() const { auto il = ilength(); size_t val = nbatch * idist; for(int i = 0; i < il.size(); ++i) { val = std::max(val, il[i] * istride[i]); } std::vector<size_t> isize(nibuffer()); for(int i = 0; i < isize.size(); ++i) { isize[i] = val + ioffset[i]; } return isize; } auto compute_osize() const { auto ol = olength(); size_t val = nbatch * odist; for(int i = 0; i < ol.size(); ++i) { val = std::max(val, ol[i] * ostride[i]); } std::vector<size_t> osize(nobuffer()); for(int i = 0; i < osize.size(); ++i) { osize[i] = val + ooffset[i]; } return osize; } std::vector<size_t> ibuffer_sizes() const { std::vector<size_t> ibuffer_sizes; if(isize.empty()) return ibuffer_sizes; switch(itype) { case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: ibuffer_sizes.resize(2); break; default: ibuffer_sizes.resize(1); } for(unsigned i = 0; i < ibuffer_sizes.size(); i++) { ibuffer_sizes[i] = isize[i] * var_size<size_t>(precision, itype); } return ibuffer_sizes; } std::vector<size_t> obuffer_sizes() const { std::vector<size_t> obuffer_sizes; if(osize.empty()) return obuffer_sizes; switch(otype) { case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: obuffer_sizes.resize(2); break; default: obuffer_sizes.resize(1); } for(unsigned i = 0; i < obuffer_sizes.size(); i++) { obuffer_sizes[i] = osize[i] * var_size<size_t>(precision, otype); } return obuffer_sizes; } // Estimate the amount of host memory needed. size_t needed_ram(const int verbose) const { // Host input, output, and input copy: 3 buffers, all contiguous. size_t needed_ram = 3 * std::accumulate(length.begin(), length.end(), 1, std::multiplies<size_t>()); // GPU input buffer: needed_ram += std::inner_product(length.begin(), length.end(), istride.begin(), 0); // GPU output buffer: needed_ram += std::inner_product(length.begin(), length.end(), ostride.begin(), 0); // Account for precision and data type: if(transform_type != rocfft_transform_type_real_forward && transform_type != rocfft_transform_type_real_inverse) { needed_ram *= 2; } switch(precision) { case rocfft_precision_single: needed_ram *= 4; break; case rocfft_precision_double: needed_ram *= 8; break; } needed_ram *= nbatch; if(verbose > 1) { std::cout << "required host memory (GiB): " << needed_ram / ONE_GiB << std::endl; } return needed_ram; } // Column-major getters: std::vector<size_t> ilength_cm() const { auto ilength_cm = ilength(); std::reverse(std::begin(ilength_cm), std::end(ilength_cm)); return ilength_cm; } std::vector<size_t> olength_cm() const { auto olength_cm = olength(); std::reverse(std::begin(olength_cm), std::end(olength_cm)); return olength_cm; } std::vector<size_t> length_cm() const { auto length_cm = length; std::reverse(std::begin(length_cm), std::end(length_cm)); return length_cm; } std::vector<size_t> istride_cm() const { auto istride_cm = istride; std::reverse(std::begin(istride_cm), std::end(istride_cm)); return istride_cm; } std::vector<size_t> ostride_cm() const { auto ostride_cm = ostride; std::reverse(std::begin(ostride_cm), std::end(ostride_cm)); return ostride_cm; } // Return true if the given GPU parameters would produce a valid transform. bool valid(const int verbose) const { if(ioffset.size() < nibuffer() || ooffset.size() < nobuffer()) return false; // Check that in-place transforms have the same input and output stride: if(placement == rocfft_placement_inplace) { const auto stridesize = std::min(istride.size(), ostride.size()); bool samestride = true; for(int i = 0; i < stridesize; ++i) { if(istride[i] != ostride[i]) samestride = false; } if((transform_type == rocfft_transform_type_complex_forward || transform_type == rocfft_transform_type_complex_inverse) && !samestride) { // In-place transforms require identical input and output strides. if(verbose) { std::cout << "istride:"; for(const auto& i : istride) std::cout << " " << i; std::cout << " ostride0:"; for(const auto& i : ostride) std::cout << " " << i; std::cout << " differ; skipped for in-place transforms: skipping test" << std::endl; } return false; } if((transform_type == rocfft_transform_type_real_forward || transform_type == rocfft_transform_type_real_inverse) && (istride.back() != 1 || ostride.back() != 1)) { // In-place real/complex transforms require unit strides. if(verbose) { std::cout << "istride.back(): " << istride.back() << " ostride.back(): " << ostride.back() << " must be unitary for in-place real/complex transforms: skipping test" << std::endl; } return false; } if((itype == rocfft_array_type_complex_interleaved && otype == rocfft_array_type_complex_planar) || (itype == rocfft_array_type_complex_planar && otype == rocfft_array_type_complex_interleaved)) { if(verbose) { std::cout << "In-place c2c transforms require identical io types; skipped.\n"; } return false; } // Check offsets switch(transform_type) { case rocfft_transform_type_complex_forward: case rocfft_transform_type_complex_inverse: for(int i = 0; i < nibuffer(); ++i) { if(ioffset[i] != ooffset[i]) return false; } break; case rocfft_transform_type_real_forward: if(ioffset[0] != 2 * ooffset[0]) return false; break; case rocfft_transform_type_real_inverse: if(2 * ioffset[0] != ooffset[0]) return false; break; } } // The parameters are valid. return true; } }; // This is used with the program_options class so that the user can type an integer on the // command line and we store into an enum varaible template <typename _Elem, typename _Traits> std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream, rocfft_array_type& atype) { unsigned tmp; stream >> tmp; atype = rocfft_array_type(tmp); return stream; } // similarly for transform type template <typename _Elem, typename _Traits> std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream, rocfft_transform_type& ttype) { unsigned tmp; stream >> tmp; ttype = rocfft_transform_type(tmp); return stream; } // count the number of total iterations for 1-, 2-, and 3-D dimensions template <typename T1> size_t count_iters(const T1& i) { return i; } template <typename T1> size_t count_iters(const std::tuple<T1, T1>& i) { return std::get<0>(i) * std::get<1>(i); } template <typename T1> size_t count_iters(const std::tuple<T1, T1, T1>& i) { return std::get<0>(i) * std::get<1>(i) * std::get<2>(i); } // Work out how many partitions to break our iteration problem into template <typename T1> static size_t compute_partition_count(T1 length) { #ifdef BUILD_CLIENTS_TESTS_OPENMP // we seem to get contention from too many threads, which slows // things down. particularly noticeable with mix_3D tests static const size_t MAX_PARTITIONS = 8; size_t iters = count_iters(length); size_t hw_threads = std::min(MAX_PARTITIONS, static_cast<size_t>(omp_get_num_procs())); if(!hw_threads) return 1; // don't bother threading problem sizes that are too small. pick // an arbitrary number of iterations and ensure that each thread // has at least that many iterations to process static const size_t MIN_ITERS_PER_THREAD = 2048; // either use the whole CPU, or use ceil(iters/iters_per_thread) return std::min(hw_threads, (iters + MIN_ITERS_PER_THREAD + 1) / MIN_ITERS_PER_THREAD); #else return 1; #endif } // Break a scalar length into some number of pieces, returning // [(start0, end0), (start1, end1), ...] template <typename T1> std::vector<std::pair<T1, T1>> partition_base(const T1& length, size_t num_parts) { static_assert(std::is_integral<T1>::value, "Integral required."); // make sure we don't exceed the length num_parts = std::min(length, num_parts); std::vector<std::pair<T1, T1>> ret(num_parts); auto partition_size = length / num_parts; T1 cur_partition = 0; for(size_t i = 0; i < num_parts; ++i, cur_partition += partition_size) { ret[i].first = cur_partition; ret[i].second = cur_partition + partition_size; } // last partition might not divide evenly, fix it up ret.back().second = length; return ret; } // Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths template <typename T1> std::vector<std::pair<T1, T1>> partition_rowmajor(const T1& length) { return partition_base(length, compute_partition_count(length)); } // Partition on the leftmost part of the tuple, for row-major indexing template <typename T1> std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> partition_rowmajor(const std::tuple<T1, T1>& length) { auto partitions = partition_base(std::get<0>(length), compute_partition_count(length)); std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size()); for(size_t i = 0; i < partitions.size(); ++i) { std::get<0>(ret[i].first) = partitions[i].first; std::get<1>(ret[i].first) = 0; std::get<0>(ret[i].second) = partitions[i].second; std::get<1>(ret[i].second) = std::get<1>(length); } return ret; } template <typename T1> std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> partition_rowmajor(const std::tuple<T1, T1, T1>& length) { auto partitions = partition_base(std::get<0>(length), compute_partition_count(length)); std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size()); for(size_t i = 0; i < partitions.size(); ++i) { std::get<0>(ret[i].first) = partitions[i].first; std::get<1>(ret[i].first) = 0; std::get<2>(ret[i].first) = 0; std::get<0>(ret[i].second) = partitions[i].second; std::get<1>(ret[i].second) = std::get<1>(length); std::get<2>(ret[i].second) = std::get<2>(length); } return ret; } // Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths template <typename T1> std::vector<std::pair<T1, T1>> partition_colmajor(const T1& length) { return partition_base(length, compute_partition_count(length)); } // Partition on the rightmost part of the tuple, for col-major indexing template <typename T1> std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> partition_colmajor(const std::tuple<T1, T1>& length) { auto partitions = partition_base(std::get<1>(length), compute_partition_count(length)); std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size()); for(size_t i = 0; i < partitions.size(); ++i) { std::get<1>(ret[i].first) = partitions[i].first; std::get<0>(ret[i].first) = 0; std::get<1>(ret[i].second) = partitions[i].second; std::get<0>(ret[i].second) = std::get<0>(length); } return ret; } template <typename T1> std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> partition_colmajor(const std::tuple<T1, T1, T1>& length) { auto partitions = partition_base(std::get<2>(length), compute_partition_count(length)); std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size()); for(size_t i = 0; i < partitions.size(); ++i) { std::get<2>(ret[i].first) = partitions[i].first; std::get<1>(ret[i].first) = 0; std::get<0>(ret[i].first) = 0; std::get<2>(ret[i].second) = partitions[i].second; std::get<1>(ret[i].second) = std::get<1>(length); std::get<0>(ret[i].second) = std::get<0>(length); } return ret; } // Specialized computation of index given 1-, 2-, 3- dimension length + stride template <typename T1, typename T2> int compute_index(T1 length, T2 stride, size_t base) { static_assert(std::is_integral<T1>::value, "Integral required."); static_assert(std::is_integral<T2>::value, "Integral required."); return (length * stride) + base; } template <typename T1, typename T2> int compute_index(const std::tuple<T1, T1>& length, const std::tuple<T2, T2>& stride, size_t base) { static_assert(std::is_integral<T1>::value, "Integral required."); static_assert(std::is_integral<T2>::value, "Integral required."); return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride)) + base; } template <typename T1, typename T2> int compute_index(const std::tuple<T1, T1, T1>& length, const std::tuple<T2, T2, T2>& stride, size_t base) { static_assert(std::is_integral<T1>::value, "Integral required."); static_assert(std::is_integral<T2>::value, "Integral required."); return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride)) + (std::get<2>(length) * std::get<2>(stride)) + base; } // Given a length vector, set the rest of the strides. // The optional argument stride0 sets the stride for the contiguous dimension. // The optional rcpadding argument sets the stride correctly for in-place // multi-dimensional real/complex transforms. // Format is row-major. template <typename T1> inline std::vector<T1> compute_stride(const std::vector<T1>& length, const std::vector<size_t>& stride0 = std::vector<size_t>(), const bool rcpadding = false) { const int dim = length.size(); std::vector<T1> stride(dim); int dimoffset = 0; if(stride0.size() == 0) { // Set the contiguous stride: stride[dim - 1] = 1; dimoffset = 1; } else { // Copy the input values to the end of the stride array: for(int i = 0; i < stride0.size(); ++i) { stride[dim - stride0.size() + i] = stride0[i]; } } if(stride0.size() < dim) { // Compute any remaining values via recursion. for(int i = dim - dimoffset - stride0.size(); i-- > 0;) { auto lengthip1 = length[i + 1]; if(rcpadding && i == dim - 2) { lengthip1 = 2 * (lengthip1 / 2 + 1); } stride[i] = stride[i + 1] * lengthip1; } } return stride; } // Copy data of dimensions length with strides istride and length idist between batches to // a buffer with strides ostride and length odist between batches. The input and output // types are identical. template <typename Tval, typename Tint1, typename Tint2, typename Tint3> inline void copy_buffers_1to1(const Tval* input, Tval* output, const Tint1& whole_length, const size_t nbatch, const Tint2& istride, const size_t idist, const Tint3& ostride, const size_t odist, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { const bool idx_equals_odx = istride == ostride && idist == odist; size_t idx_base = 0; size_t odx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist) { #pragma omp parallel for num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base); output[odx + ooffset[0]] = input[idx + ioffset[0]]; } while(increment_rowmajor(index, length)); } } } // Copy data of dimensions length with strides istride and length idist between batches to // a buffer with strides ostride and length odist between batches. The input type is // planar and the output type is complex interleaved. template <typename Tval, typename Tint1, typename Tint2, typename Tint3> inline void copy_buffers_2to1(const Tval* input0, const Tval* input1, std::complex<Tval>* output, const Tint1& whole_length, const size_t nbatch, const Tint2& istride, const size_t idist, const Tint3& ostride, const size_t odist, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { const bool idx_equals_odx = istride == ostride && idist == odist; size_t idx_base = 0; size_t odx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist) { #pragma omp parallel for num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base); output[odx + ooffset[0]] = std::complex<Tval>(input0[idx + ioffset[0]], input1[idx + ioffset[1]]); } while(increment_rowmajor(index, length)); } } } // Copy data of dimensions length with strides istride and length idist between batches to // a buffer with strides ostride and length odist between batches. The input type is // complex interleaved and the output type is planar. template <typename Tval, typename Tint1, typename Tint2, typename Tint3> inline void copy_buffers_1to2(const std::complex<Tval>* input, Tval* output0, Tval* output1, const Tint1& whole_length, const size_t nbatch, const Tint2& istride, const size_t idist, const Tint3& ostride, const size_t odist, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { const bool idx_equals_odx = istride == ostride && idist == odist; size_t idx_base = 0; size_t odx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist) { #pragma omp parallel for num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base); output0[odx + ooffset[0]] = input[idx + ioffset[0]].real(); output1[odx + ooffset[1]] = input[idx + ioffset[0]].imag(); } while(increment_rowmajor(index, length)); } } } // Copy data of dimensions length with strides istride and length idist between batches to // a buffer with strides ostride and length odist between batches. The input type given // by itype, and the output type is given by otype. template <typename Tallocator1, typename Tallocator2, typename Tint1, typename Tint2, typename Tint3> inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input, std::vector<std::vector<char, Tallocator2>>& output, const Tint1& length, const size_t nbatch, const rocfft_precision precision, const rocfft_array_type itype, const Tint2& istride, const size_t idist, const rocfft_array_type otype, const Tint3& ostride, const size_t odist, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { if(itype == otype) { switch(itype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_hermitian_interleaved: switch(precision) { case rocfft_precision_single: copy_buffers_1to1(reinterpret_cast<const std::complex<float>*>(input[0].data()), reinterpret_cast<std::complex<float>*>(output[0].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; case rocfft_precision_double: copy_buffers_1to1(reinterpret_cast<const std::complex<double>*>(input[0].data()), reinterpret_cast<std::complex<double>*>(output[0].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; } break; case rocfft_array_type_real: case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: for(int idx = 0; idx < input.size(); ++idx) { switch(precision) { case rocfft_precision_single: copy_buffers_1to1(reinterpret_cast<const float*>(input[idx].data()), reinterpret_cast<float*>(output[idx].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; case rocfft_precision_double: copy_buffers_1to1(reinterpret_cast<const double*>(input[idx].data()), reinterpret_cast<double*>(output[idx].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; } } break; default: throw std::runtime_error("Invalid data type"); break; } } else if((itype == rocfft_array_type_complex_interleaved && otype == rocfft_array_type_complex_planar) || (itype == rocfft_array_type_hermitian_interleaved && otype == rocfft_array_type_hermitian_planar)) { // copy 1to2 switch(precision) { case rocfft_precision_single: copy_buffers_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()), reinterpret_cast<float*>(output[0].data()), reinterpret_cast<float*>(output[1].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; case rocfft_precision_double: copy_buffers_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()), reinterpret_cast<double*>(output[0].data()), reinterpret_cast<double*>(output[1].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; } } else if((itype == rocfft_array_type_complex_planar && otype == rocfft_array_type_complex_interleaved) || (itype == rocfft_array_type_hermitian_planar && otype == rocfft_array_type_hermitian_interleaved)) { // copy 2 to 1 switch(precision) { case rocfft_precision_single: copy_buffers_2to1(reinterpret_cast<const float*>(input[0].data()), reinterpret_cast<const float*>(input[1].data()), reinterpret_cast<std::complex<float>*>(output[0].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; case rocfft_precision_double: copy_buffers_2to1(reinterpret_cast<const double*>(input[0].data()), reinterpret_cast<const double*>(input[1].data()), reinterpret_cast<std::complex<double>*>(output[0].data()), length, nbatch, istride, idist, ostride, odist, ioffset, ooffset); break; } } else { throw std::runtime_error("Invalid input and output types."); } } // unroll arbitrary-dimension copy_buffers into specializations for 1-, 2-, 3-dimensions template <typename Tallocator1, typename Tallocator2, typename Tint1, typename Tint2, typename Tint3> inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input, std::vector<std::vector<char, Tallocator2>>& output, const std::vector<Tint1>& length, const size_t nbatch, const rocfft_precision precision, const rocfft_array_type itype, const std::vector<Tint2>& istride, const size_t idist, const rocfft_array_type otype, const std::vector<Tint3>& ostride, const size_t odist, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { switch(length.size()) { case 1: return copy_buffers(input, output, length[0], nbatch, precision, itype, istride[0], idist, otype, ostride[0], odist, ioffset, ooffset); case 2: return copy_buffers(input, output, std::make_tuple(length[0], length[1]), nbatch, precision, itype, std::make_tuple(istride[0], istride[1]), idist, otype, std::make_tuple(ostride[0], ostride[1]), odist, ioffset, ooffset); case 3: return copy_buffers(input, output, std::make_tuple(length[0], length[1], length[2]), nbatch, precision, itype, std::make_tuple(istride[0], istride[1], istride[2]), idist, otype, std::make_tuple(ostride[0], ostride[1], ostride[2]), odist, ioffset, ooffset); default: abort(); } } // Compute the L-infinity and L-2 distance between two buffers with strides istride and // length idist between batches to a buffer with strides ostride and length odist between // batches. Both buffers are of complex type. struct VectorNorms { double l_2 = 0.0, l_inf = 0.0; }; template <typename Tcomplex, typename Tint1, typename Tint2, typename Tint3> inline VectorNorms distance_1to1_complex(const Tcomplex* input, const Tcomplex* output, const Tint1& whole_length, const size_t nbatch, const Tint2& istride, const size_t idist, const Tint3& ostride, const size_t odist, std::vector<std::pair<size_t, size_t>>& linf_failures, const double linf_cutoff, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { double linf = 0.0; double l2 = 0.0; std::mutex linf_failure_lock; const bool idx_equals_odx = istride == ostride && idist == odist; size_t idx_base = 0; size_t odx_base = 0; auto partitions = partition_colmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist) { #pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { double cur_linf = 0.0; double cur_l2 = 0.0; auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base); const double rdiff = std::abs(output[odx + ooffset[0]].real() - input[idx + ioffset[0]].real()); cur_linf = std::max(rdiff, cur_linf); if(cur_linf > linf_cutoff) { std::pair<size_t, size_t> fval(b, idx); linf_failure_lock.lock(); linf_failures.push_back(fval); linf_failure_lock.unlock(); } cur_l2 += rdiff * rdiff; const double idiff = std::abs(output[odx + ooffset[0]].imag() - input[idx + ioffset[0]].imag()); cur_linf = std::max(idiff, cur_linf); if(cur_linf > linf_cutoff) { std::pair<size_t, size_t> fval(b, idx); linf_failure_lock.lock(); linf_failures.push_back(fval); linf_failure_lock.unlock(); } cur_l2 += idiff * idiff; } while(increment_rowmajor(index, length)); linf = std::max(linf, cur_linf); l2 += cur_l2; } } return {.l_2 = sqrt(l2), .l_inf = linf}; } // Compute the L-infinity and L-2 distance between two buffers with strides istride and // length idist between batches to a buffer with strides ostride and length odist between // batches. Both buffers are of real type. template <typename Tfloat, typename Tint1, typename Tint2, typename Tint3> inline VectorNorms distance_1to1_real(const Tfloat* input, const Tfloat* output, const Tint1& whole_length, const size_t nbatch, const Tint2& istride, const size_t idist, const Tint3& ostride, const size_t odist, std::vector<std::pair<size_t, size_t>>& linf_failures, const double linf_cutoff, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { double linf = 0.0; double l2 = 0.0; std::mutex linf_failure_lock; const bool idx_equals_odx = istride == ostride && idist == odist; size_t idx_base = 0; size_t odx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist) { #pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { double cur_linf = 0.0; double cur_l2 = 0.0; auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base); const double diff = std::abs(output[odx + ooffset[0]] - input[idx + ioffset[0]]); cur_linf = std::max(diff, cur_linf); if(cur_linf > linf_cutoff) { std::pair<size_t, size_t> fval(b, idx); linf_failure_lock.lock(); linf_failures.push_back(fval); linf_failure_lock.unlock(); } cur_l2 += diff * diff; } while(increment_rowmajor(index, length)); linf = std::max(linf, cur_linf); l2 += cur_l2; } } return {.l_2 = sqrt(l2), .l_inf = linf}; } // Compute the L-infinity and L-2 distance between two buffers with strides istride and // length idist between batches to a buffer with strides ostride and length odist between // batches. input is complex-interleaved, output is complex-planar. template <typename Tval, typename Tint1, typename T2, typename T3> inline VectorNorms distance_1to2(const std::complex<Tval>* input, const Tval* output0, const Tval* output1, const Tint1& whole_length, const size_t nbatch, const T2& istride, const size_t idist, const T3& ostride, const size_t odist, std::vector<std::pair<size_t, size_t>>& linf_failures, const double linf_cutoff, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { double linf = 0.0; double l2 = 0.0; std::mutex linf_failure_lock; const bool idx_equals_odx = istride == ostride && idist == odist; size_t idx_base = 0; size_t odx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist) { #pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { double cur_linf = 0.0; double cur_l2 = 0.0; auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base); const double rdiff = std::abs(output0[odx + ooffset[0]] - input[idx + ioffset[0]].real()); cur_linf = std::max(rdiff, cur_linf); if(cur_linf > linf_cutoff) { std::pair<size_t, size_t> fval(b, idx); linf_failure_lock.lock(); linf_failures.push_back(fval); linf_failure_lock.unlock(); } cur_l2 += rdiff * rdiff; const double idiff = std::abs(output1[odx + ooffset[1]] - input[idx + ioffset[0]].imag()); cur_linf = std::max(idiff, cur_linf); if(cur_linf > linf_cutoff) { std::pair<size_t, size_t> fval(b, idx); linf_failure_lock.lock(); linf_failures.push_back(fval); linf_failure_lock.unlock(); } cur_l2 += idiff * idiff; } while(increment_rowmajor(index, length)); linf = std::max(linf, cur_linf); l2 += cur_l2; } } return {.l_2 = sqrt(l2), .l_inf = linf}; } // Compute the L-inifnity and L-2 distance between two buffers of dimension length and // with types given by itype, otype, and precision. template <typename Tallocator1, typename Tallocator2, typename Tint1, typename Tint2, typename Tint3> inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input, const std::vector<std::vector<char, Tallocator2>>& output, const Tint1& length, const size_t nbatch, const rocfft_precision precision, const rocfft_array_type itype, const Tint2& istride, const size_t idist, const rocfft_array_type otype, const Tint3& ostride, const size_t odist, std::vector<std::pair<size_t, size_t>>& linf_failures, const double linf_cutoff, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { VectorNorms dist; if(itype == otype) { switch(itype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_hermitian_interleaved: switch(precision) { case rocfft_precision_single: dist = distance_1to1_complex( reinterpret_cast<const std::complex<float>*>(input[0].data()), reinterpret_cast<const std::complex<float>*>(output[0].data()), length, nbatch, istride, idist, ostride, odist, linf_failures, linf_cutoff, ioffset, ooffset); break; case rocfft_precision_double: dist = distance_1to1_complex( reinterpret_cast<const std::complex<double>*>(input[0].data()), reinterpret_cast<const std::complex<double>*>(output[0].data()), length, nbatch, istride, idist, ostride, odist, linf_failures, linf_cutoff, ioffset, ooffset); break; } dist.l_2 *= dist.l_2; break; case rocfft_array_type_real: case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: for(int idx = 0; idx < input.size(); ++idx) { VectorNorms d; switch(precision) { case rocfft_precision_single: d = distance_1to1_real(reinterpret_cast<const float*>(input[idx].data()), reinterpret_cast<const float*>(output[idx].data()), length, nbatch, istride, idist, ostride, odist, linf_failures, linf_cutoff, ioffset, ooffset); break; case rocfft_precision_double: d = distance_1to1_real(reinterpret_cast<const double*>(input[idx].data()), reinterpret_cast<const double*>(output[idx].data()), length, nbatch, istride, idist, ostride, odist, linf_failures, linf_cutoff, ioffset, ooffset); break; } dist.l_inf = std::max(d.l_inf, dist.l_inf); dist.l_2 += d.l_2 * d.l_2; } break; default: throw std::runtime_error("Invalid input and output types."); break; } } else if((itype == rocfft_array_type_complex_interleaved && otype == rocfft_array_type_complex_planar) || (itype == rocfft_array_type_hermitian_interleaved && otype == rocfft_array_type_hermitian_planar)) { switch(precision) { case rocfft_precision_single: dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()), reinterpret_cast<const float*>(output[0].data()), reinterpret_cast<const float*>(output[1].data()), length, nbatch, istride, idist, ostride, odist, linf_failures, linf_cutoff, ioffset, ooffset); break; case rocfft_precision_double: dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()), reinterpret_cast<const double*>(output[0].data()), reinterpret_cast<const double*>(output[1].data()), length, nbatch, istride, idist, ostride, odist, linf_failures, linf_cutoff, ioffset, ooffset); break; } dist.l_2 *= dist.l_2; } else if((itype == rocfft_array_type_complex_planar && otype == rocfft_array_type_complex_interleaved) || (itype == rocfft_array_type_hermitian_planar && otype == rocfft_array_type_hermitian_interleaved)) { switch(precision) { case rocfft_precision_single: dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(output[0].data()), reinterpret_cast<const float*>(input[0].data()), reinterpret_cast<const float*>(input[1].data()), length, nbatch, ostride, odist, istride, idist, linf_failures, linf_cutoff, ioffset, ooffset); break; case rocfft_precision_double: dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(output[0].data()), reinterpret_cast<const double*>(input[0].data()), reinterpret_cast<const double*>(input[1].data()), length, nbatch, ostride, odist, istride, idist, linf_failures, linf_cutoff, ioffset, ooffset); break; } dist.l_2 *= dist.l_2; } else { throw std::runtime_error("Invalid input and output types."); } dist.l_2 = sqrt(dist.l_2); return dist; } // Unroll arbitrary-dimension distance into specializations for 1-, 2-, 3-dimensions template <typename Tallocator1, typename Tallocator2, typename Tint1, typename Tint2, typename Tint3> inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input, const std::vector<std::vector<char, Tallocator2>>& output, const std::vector<Tint1>& length, const size_t nbatch, const rocfft_precision precision, const rocfft_array_type itype, const std::vector<Tint2>& istride, const size_t idist, const rocfft_array_type otype, const std::vector<Tint3>& ostride, const size_t odist, std::vector<std::pair<size_t, size_t>>& linf_failures, const double linf_cutoff, const std::vector<size_t>& ioffset, const std::vector<size_t>& ooffset) { switch(length.size()) { case 1: return distance(input, output, length[0], nbatch, precision, itype, istride[0], idist, otype, ostride[0], odist, linf_failures, linf_cutoff, ioffset, ooffset); case 2: return distance(input, output, std::make_tuple(length[0], length[1]), nbatch, precision, itype, std::make_tuple(istride[0], istride[1]), idist, otype, std::make_tuple(ostride[0], ostride[1]), odist, linf_failures, linf_cutoff, ioffset, ooffset); case 3: return distance(input, output, std::make_tuple(length[0], length[1], length[2]), nbatch, precision, itype, std::make_tuple(istride[0], istride[1], istride[2]), idist, otype, std::make_tuple(ostride[0], ostride[1], ostride[2]), odist, linf_failures, linf_cutoff, ioffset, ooffset); default: abort(); } } // Compute the L-infinity and L-2 norm of a buffer with strides istride and // length idist. Data is std::complex. template <typename Tcomplex, typename T1, typename T2> inline VectorNorms norm_complex(const Tcomplex* input, const T1& whole_length, const size_t nbatch, const T2& istride, const size_t idist, const std::vector<size_t>& offset) { double linf = 0.0; double l2 = 0.0; size_t idx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist) { #pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { double cur_linf = 0.0; double cur_l2 = 0.0; auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const double rval = std::abs(input[idx + offset[0]].real()); cur_linf = std::max(rval, cur_linf); cur_l2 += rval * rval; const double ival = std::abs(input[idx + offset[0]].imag()); cur_linf = std::max(ival, cur_linf); cur_l2 += ival * ival; } while(increment_rowmajor(index, length)); linf = std::max(linf, cur_linf); l2 += cur_l2; } } return {.l_2 = sqrt(l2), .l_inf = linf}; } // Compute the L-infinity and L-2 norm of abuffer with strides istride and // length idist. Data is real-valued. template <typename Tfloat, typename T1, typename T2> inline VectorNorms norm_real(const Tfloat* input, const T1& whole_length, const size_t nbatch, const T2& istride, const size_t idist, const std::vector<size_t>& offset) { double linf = 0.0; double l2 = 0.0; size_t idx_base = 0; auto partitions = partition_rowmajor(whole_length); for(size_t b = 0; b < nbatch; b++, idx_base += idist) { #pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { double cur_linf = 0.0; double cur_l2 = 0.0; auto index = partitions[part].first; const auto length = partitions[part].second; do { const int idx = compute_index(index, istride, idx_base); const double val = std::abs(input[idx + offset[0]]); cur_linf = std::max(val, cur_linf); cur_l2 += val * val; } while(increment_rowmajor(index, length)); linf = std::max(linf, cur_linf); l2 += cur_l2; } } return {.l_2 = sqrt(l2), .l_inf = linf}; } // Compute the L-infinity and L-2 norm of abuffer with strides istride and // length idist. Data format is given by precision and itype. template <typename Tallocator1, typename T1, typename T2> inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input, const T1& length, const size_t nbatch, const rocfft_precision precision, const rocfft_array_type itype, const T2& istride, const size_t idist, const std::vector<size_t>& offset) { VectorNorms norm; switch(itype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_hermitian_interleaved: switch(precision) { case rocfft_precision_single: norm = norm_complex(reinterpret_cast<const std::complex<float>*>(input[0].data()), length, nbatch, istride, idist, offset); break; case rocfft_precision_double: norm = norm_complex(reinterpret_cast<const std::complex<double>*>(input[0].data()), length, nbatch, istride, idist, offset); break; } norm.l_2 *= norm.l_2; break; case rocfft_array_type_real: case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: for(int idx = 0; idx < input.size(); ++idx) { VectorNorms n; switch(precision) { case rocfft_precision_single: n = norm_real(reinterpret_cast<const float*>(input[idx].data()), length, nbatch, istride, idist, offset); break; case rocfft_precision_double: n = norm_real(reinterpret_cast<const double*>(input[idx].data()), length, nbatch, istride, idist, offset); break; } norm.l_inf = std::max(n.l_inf, norm.l_inf); norm.l_2 += n.l_2 * n.l_2; } break; default: throw std::runtime_error("Invalid data type"); break; } norm.l_2 = sqrt(norm.l_2); return norm; } // Unroll arbitrary-dimension norm into specializations for 1-, 2-, 3-dimensions template <typename Tallocator1, typename T1, typename T2> inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input, const std::vector<T1>& length, const size_t nbatch, const rocfft_precision precision, const rocfft_array_type type, const std::vector<T2>& stride, const size_t dist, const std::vector<size_t>& offset) { switch(length.size()) { case 1: return norm(input, length[0], nbatch, precision, type, stride[0], dist, offset); case 2: return norm(input, std::make_tuple(length[0], length[1]), nbatch, precision, type, std::make_tuple(stride[0], stride[1]), dist, offset); case 3: return norm(input, std::make_tuple(length[0], length[1], length[2]), nbatch, precision, type, std::make_tuple(stride[0], stride[1], stride[2]), dist, offset); default: abort(); } } // Given a buffer of complex values stored in a vector of chars (or two vectors in the // case of planar format), impose Hermitian symmetry. // NB: length is the dimensions of the FFT, not the data layout dimensions. template <typename Tfloat, typename Tallocator, typename Tsize> inline void impose_hermitian_symmetry(std::vector<std::vector<char, Tallocator>>& vals, const std::vector<Tsize>& length, const std::vector<Tsize>& istride, const Tsize idist, const Tsize nbatch) { switch(vals.size()) { case 1: { // Complex interleaved data for(auto ibatch = 0; ibatch < nbatch; ++ibatch) { auto data = ((std::complex<Tfloat>*)vals[0].data()) + ibatch * idist; switch(length.size()) { case 3: if(length[2] % 2 == 0) { data[istride[2] * (length[2] / 2)].imag(0.0); } if(length[0] % 2 == 0 && length[2] % 2 == 0) { data[istride[0] * (length[0] / 2) + istride[2] * (length[2] / 2)].imag(0.0); } if(length[1] % 2 == 0 && length[2] % 2 == 0) { data[istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)].imag(0.0); } if(length[0] % 2 == 0 && length[1] % 2 == 0 && length[2] % 2 == 0) { // clang format off data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)] .imag(0.0); // clang format off } // y-axis: for(auto j = 1; j < (length[1] + 1) / 2; ++j) { data[istride[1] * (length[1] - j)] = std::conj(data[istride[1] * j]); } if(length[0] % 2 == 0) { // y-axis at x-nyquist for(auto j = 1; j < (length[1] + 1) / 2; ++j) { // clang format off data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)] = std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j]); // clang format on } } // x-axis: for(auto i = 1; i < (length[0] + 1) / 2; ++i) { data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]); } if(length[1] % 2 == 0) { // x-axis at y-nyquist for(auto i = 1; i < (length[0] + 1) / 2; ++i) { // clang format off data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)] = std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]); // clang format on } } // x-y plane: for(auto i = 1; i < (length[0] + 1) / 2; ++i) { for(auto j = 1; j < length[1]; ++j) { // clang format off data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)] = std::conj(data[istride[0] * i + istride[1] * j]); // clang format on } } if(length[2] % 2 == 0) { // x-axis at z-nyquist for(auto i = 1; i < (length[0] + 1) / 2; ++i) { data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)] = std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]); } if(length[1] % 2 == 0) { // x-axis at yz-nyquist for(auto i = 1; i < (length[0] + 1) / 2; ++i) { data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)] = std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]); } } // y-axis: at z-nyquist for(auto j = 1; j < (length[1] + 1) / 2; ++j) { data[istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)] = std::conj(data[istride[1] * j + istride[2] * (length[2] / 2)]); } if(length[0] % 2 == 0) { // y-axis: at xz-nyquist for(auto j = 1; j < (length[1] + 1) / 2; ++j) { // clang format off data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)] = std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j + istride[2] * (length[2] / 2)]); // clang format on } } // x-y plane: at z-nyquist for(auto i = 1; i < (length[0] + 1) / 2; ++i) { for(auto j = 1; j < length[1]; ++j) { // clang format off data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)] = std::conj(data[istride[0] * i + istride[1] * j + istride[2] * (length[2] / 2)]); // clang format on } } } // fall-through case 2: if(length[1] % 2 == 0) { data[istride[1] * (length[1] / 2)].imag(0.0); } if(length[0] % 2 == 0 && length[1] % 2 == 0) { data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)].imag(0.0); } for(auto i = 1; i < (length[0] + 1) / 2; ++i) { data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]); } if(length[1] % 2 == 0) { for(auto i = 1; i < (length[0] + 1) / 2; ++i) { data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)] = std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]); } } // fall-through case 1: data[0].imag(0.0); if(length[0] % 2 == 0) { data[istride[0] * (length[0] / 2)].imag(0.0); } break; default: throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry"); break; } } break; } case 2: { // Complex planar data for(auto ibatch = 0; ibatch < nbatch; ++ibatch) { auto idata = ((Tfloat*)vals[1].data()) + ibatch * idist; switch(length.size()) { case 3: throw std::runtime_error("Not implemented"); // FIXME: implement case 2: throw std::runtime_error("Not implemented"); // FIXME: implement case 1: idata[0] = 0.0; if(length[0] % 2 == 0) { idata[istride[0] * (length[0] / 2)] = 0.0; } break; default: throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry"); break; } } break; } default: throw std::runtime_error("Invalid data type"); break; } } // Given an array type and transform length, strides, etc, load random floats in [0,1] // into the input array of floats/doubles or complex floats/doubles, which is stored in a // vector of chars (or two vectors in the case of planar format). // lengths are the memory lengths (ie not the transform parameters) template <typename Tfloat, typename Tallocator, typename Tint1> inline void set_input(std::vector<std::vector<char, Tallocator>>& input, const rocfft_array_type itype, const Tint1& whole_length, const Tint1& istride, const size_t idist, const size_t nbatch) { switch(itype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_hermitian_interleaved: { auto idata = (std::complex<Tfloat>*)input[0].data(); size_t i_base = 0; auto partitions = partition_rowmajor(whole_length); for(auto b = 0; b < nbatch; b++, i_base += idist) { #pragma omp parallel for num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { auto index = partitions[part].first; const auto length = partitions[part].second; std::mt19937 gen(compute_index(index, istride, i_base)); do { const int i = compute_index(index, istride, i_base); const Tfloat x = (Tfloat)gen() / (Tfloat)gen.max(); const Tfloat y = (Tfloat)gen() / (Tfloat)gen.max(); const std::complex<Tfloat> val(x, y); idata[i] = val; } while(increment_rowmajor(index, length)); } } break; } case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_planar: { auto ireal = (Tfloat*)input[0].data(); auto iimag = (Tfloat*)input[1].data(); size_t i_base = 0; auto partitions = partition_rowmajor(whole_length); for(auto b = 0; b < nbatch; b++, i_base += idist) { #pragma omp parallel for num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { auto index = partitions[part].first; const auto length = partitions[part].second; std::mt19937 gen(compute_index(index, istride, i_base)); do { const int i = compute_index(index, istride, i_base); const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(), (Tfloat)gen() / (Tfloat)gen.max()); ireal[i] = val.real(); iimag[i] = val.imag(); } while(increment_rowmajor(index, length)); } } break; } case rocfft_array_type_real: { auto idata = (Tfloat*)input[0].data(); size_t i_base = 0; auto partitions = partition_rowmajor(whole_length); for(auto b = 0; b < nbatch; b++, i_base += idist) { #pragma omp parallel for num_threads(partitions.size()) for(size_t part = 0; part < partitions.size(); ++part) { auto index = partitions[part].first; const auto length = partitions[part].second; std::mt19937 gen(compute_index(index, istride, i_base)); do { const int i = compute_index(index, istride, i_base); const Tfloat val = (Tfloat)gen() / (Tfloat)gen.max(); idata[i] = val; } while(increment_rowmajor(index, length)); } } break; } default: throw std::runtime_error("Input layout format not yet supported"); break; } } // unroll set_input for dimension 1, 2, 3 template <typename Tfloat, typename Tallocator> inline void set_input(std::vector<std::vector<char, Tallocator>>& input, const rocfft_array_type itype, const std::vector<size_t>& length, const std::vector<size_t>& istride, const size_t idist, const size_t nbatch) { switch(length.size()) { case 1: set_input<Tfloat>(input, itype, length[0], istride[0], idist, nbatch); break; case 2: set_input<Tfloat>(input, itype, std::make_tuple(length[0], length[1]), std::make_tuple(istride[0], istride[1]), idist, nbatch); break; case 3: set_input<Tfloat>(input, itype, std::make_tuple(length[0], length[1], length[2]), std::make_tuple(istride[0], istride[1], istride[2]), idist, nbatch); break; default: abort(); } } // Compute the idist for a given transform based on the placeness, transform type, and // data layout. template <typename Tsize> inline size_t set_idist(const rocfft_result_placement place, const rocfft_transform_type transformType, const std::vector<Tsize>& length, const std::vector<Tsize>& istride) { const Tsize dim = length.size(); // In-place 1D transforms need extra dist. if(transformType == rocfft_transform_type_real_forward && dim == 1 && place == rocfft_placement_inplace) { return 2 * (length[0] / 2 + 1) * istride[0]; } if(transformType == rocfft_transform_type_real_inverse && dim == 1) { return (length[0] / 2 + 1) * istride[0]; } Tsize idist = (transformType == rocfft_transform_type_real_inverse) ? (length[dim - 1] / 2 + 1) * istride[dim - 1] : length[dim - 1] * istride[dim - 1]; for(int i = 0; i < dim - 1; ++i) { idist = std::max(length[i] * istride[i], idist); } return idist; } // Compute the odist for a given transform based on the placeness, transform type, and // data layout. Row-major. template <typename Tsize> inline size_t set_odist(const rocfft_result_placement place, const rocfft_transform_type transformType, const std::vector<Tsize>& length, const std::vector<Tsize>& ostride) { const Tsize dim = length.size(); // In-place 1D transforms need extra dist. if(transformType == rocfft_transform_type_real_inverse && dim == 1 && place == rocfft_placement_inplace) { return 2 * (length[0] / 2 + 1) * ostride[0]; } if(transformType == rocfft_transform_type_real_forward && dim == 1) { return (length[0] / 2 + 1) * ostride[0]; } Tsize odist = (transformType == rocfft_transform_type_real_forward) ? (length[dim - 1] / 2 + 1) * ostride[dim - 1] : length[dim - 1] * ostride[dim - 1]; for(int i = 0; i < dim - 1; ++i) { odist = std::max(length[i] * ostride[i], odist); } return odist; } // Given a data type and precision, the distance between batches, and the batch size, // allocate the required host buffer(s). template <typename Allocator = std::allocator<char>> inline std::vector<std::vector<char, Allocator>> allocate_host_buffer( const rocfft_precision precision, const rocfft_array_type type, const std::vector<size_t>& size) { std::vector<std::vector<char, Allocator>> buffers(size.size()); for(int i = 0; i < size.size(); ++i) { buffers[i].resize(size[i] * var_size<size_t>(precision, type)); } return buffers; } // Given a data type and dimensions, fill the buffer, imposing Hermitian symmetry if // necessary. // NB: length is the logical size of the FFT, and not necessarily the data dimensions template <typename Allocator = std::allocator<char>> inline std::vector<std::vector<char, Allocator>> compute_input(const rocfft_params& params) { auto input = allocate_host_buffer<Allocator>(params.precision, params.itype, params.isize); for(auto& i : input) { std::fill(i.begin(), i.end(), 0.0); } switch(params.precision) { case rocfft_precision_double: set_input<double>( input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch); break; case rocfft_precision_single: set_input<float>( input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch); break; } if(params.itype == rocfft_array_type_hermitian_interleaved || params.itype == rocfft_array_type_hermitian_planar) { switch(params.precision) { case rocfft_precision_double: impose_hermitian_symmetry<double>( input, params.length, params.istride, params.idist, params.nbatch); break; case rocfft_precision_single: impose_hermitian_symmetry<float>( input, params.length, params.istride, params.idist, params.nbatch); break; } } return input; } // Check that the input and output types are consistent. inline void check_iotypes(const rocfft_result_placement place, const rocfft_transform_type transformType, const rocfft_array_type itype, const rocfft_array_type otype) { switch(itype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_interleaved: case rocfft_array_type_hermitian_planar: case rocfft_array_type_real: break; default: throw std::runtime_error("Invalid Input array type format"); } switch(otype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_interleaved: case rocfft_array_type_hermitian_planar: case rocfft_array_type_real: break; default: throw std::runtime_error("Invalid Input array type format"); } // Check that format choices are supported if(transformType != rocfft_transform_type_real_forward && transformType != rocfft_transform_type_real_inverse) { if(place == rocfft_placement_inplace && itype != otype) { throw std::runtime_error( "In-place transforms must have identical input and output types"); } } bool okformat = true; switch(itype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_complex_planar: okformat = (otype == rocfft_array_type_complex_interleaved || otype == rocfft_array_type_complex_planar); break; case rocfft_array_type_hermitian_interleaved: case rocfft_array_type_hermitian_planar: okformat = otype == rocfft_array_type_real; break; case rocfft_array_type_real: okformat = (otype == rocfft_array_type_hermitian_interleaved || otype == rocfft_array_type_hermitian_planar); break; default: throw std::runtime_error("Invalid Input array type format"); } switch(otype) { case rocfft_array_type_complex_interleaved: case rocfft_array_type_complex_planar: case rocfft_array_type_hermitian_interleaved: case rocfft_array_type_hermitian_planar: case rocfft_array_type_real: break; default: okformat = false; } if(!okformat) { throw std::runtime_error("Invalid combination of Input/Output array type formats"); } } // Check that the input and output types are consistent. If they are unset, assign // default values based on the transform type. inline void check_set_iotypes(const rocfft_result_placement place, const rocfft_transform_type transformType, rocfft_array_type& itype, rocfft_array_type& otype) { if(itype == rocfft_array_type_unset) { switch(transformType) { case rocfft_transform_type_complex_forward: case rocfft_transform_type_complex_inverse: itype = rocfft_array_type_complex_interleaved; break; case rocfft_transform_type_real_forward: itype = rocfft_array_type_real; break; case rocfft_transform_type_real_inverse: itype = rocfft_array_type_hermitian_interleaved; break; default: throw std::runtime_error("Invalid transform type"); } } if(otype == rocfft_array_type_unset) { switch(transformType) { case rocfft_transform_type_complex_forward: case rocfft_transform_type_complex_inverse: otype = rocfft_array_type_complex_interleaved; break; case rocfft_transform_type_real_forward: otype = rocfft_array_type_hermitian_interleaved; break; case rocfft_transform_type_real_inverse: otype = rocfft_array_type_real; break; default: throw std::runtime_error("Invalid transform type"); } } check_iotypes(place, transformType, itype, otype); } #endif
assign_1.c
// // Created by nick on 1/30/18. // #include "limits.h" #include "stdio.h" #include "stdlib.h" #include "memory.h" #include "omp.h" #include <sys/time.h> // ======== Defines for the entire project ========= // Getting the value of the macro as a string #define STR(name) #name #define MACRO_VALUE(name) STR(name) // Define the number of histogram bins here #define HIST_BINS 10 // Define the number of threads to use #define THREADS 12 // Define the chunk size #define CHUNK 8 // Define the scheduling type #define SCHED_TYPE dynamic // Define the scheduling to use #define SCHEDULE schedule(SCHED_TYPE, CHUNK) // Define the scheduling value as a string #define SCHED_VALUE MACRO_VALUE(SCHEDULE) // Const defines for when no parameters are given const int N_global = 10000; const int M_global = 1000; // gets the current time in seconds with microsecond precision double get_time() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec * 1e-6; } // Initializes a matrix of size NxN with values of [0, M) void init(int N, int M, int A[N][N]) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = rand() % M; } } } // Prints every value inside of a matrix void print_matrix(int N, int matrix[N][N]) { for (int j = 0; j < N; j++) { for (int i = 0; i < N; i++) { printf("%d\t", matrix[i][j]); } printf("\n"); } } // Prints every value inside of a histogram void print_histogram(int hist[]) { int sum = 0; for (int i = 0; i < HIST_BINS; i++) { sum += hist[i]; printf("%d\t", hist[i]); } printf("Sum: %d\n", sum); } // Testing macros used so that the main void doesn't have so much clutter #define PERFORM_TEST_MAX(function) \ initialClock = get_time(); \ max = function(N, matrix); \ executionTime = (get_time() - initialClock) * 1000.f; \ printf("Found the max for %s: %d in %.2f ms\n", #function, max, executionTime); #define PERFORM_TEST_BINS(function) \ initialClock = get_time(); \ function(N, M, hist, matrix); \ executionTime = (get_time() - initialClock) * 1000.0f; \ printf("Created the histogram with %s in %.2f ms\n", #function, executionTime); \ print_histogram(hist); \ memset(hist, 0, sizeof(hist)); // Serial: No parallel at all, just brute force it on one thread int find_matrix_max_s(int N, int matrix[N][N]); void fill_bins_s(int N, int M, int hist[], int matrix[N][N]); // Parallel 1: Manual decomposition int find_matrix_max_p1(int N, int matrix[N][N]); void fill_bins_p1(int N, int M, int hist[], int matrix[N][N]); // Parallel 2: Use "for" construct without "reduction" clause int find_matrix_max_p2(int N, int matrix[N][N]); void fill_bins_p2(int N, int M, int hist[], int matrix[N][N]); // Parallel 3: Use "for" construct with "reduction" clause int find_matrix_max_p3(int N, int matrix[N][N]); void fill_bins_p3(int N, int M, int hist[], int matrix[N][N]); int main(int argc, char *argv[]) { int N, M; // Gather the size of matrix (N) and max possible value (M) if (argc < 3) { // Use the globals when the arguments weren't passed right N = N_global; M = M_global; } else { N = atoi(argv[1]); M = atoi(argv[2]); } // Setup random generator srand(1 << 12); // Generate the matrix int (*matrix)[N] = malloc(sizeof(int[N][N])); init(N, M, matrix); // Create the histogram array int hist[HIST_BINS]; memset(hist, 0, sizeof(hist)); //print_matrix(N, matrix); // Things used by the macro double initialClock, executionTime; // used for timing int max; // used for storing the max printf("Running the tests with %d thread(s) and %s\n", THREADS, SCHED_VALUE); // Perform the "find the max" tests PERFORM_TEST_MAX(find_matrix_max_s); PERFORM_TEST_MAX(find_matrix_max_p1); PERFORM_TEST_MAX(find_matrix_max_p2); PERFORM_TEST_MAX(find_matrix_max_p3); // Perform the histogram tests PERFORM_TEST_BINS(fill_bins_s); PERFORM_TEST_BINS(fill_bins_p1); PERFORM_TEST_BINS(fill_bins_p2); PERFORM_TEST_BINS(fill_bins_p3); return 0; } // ============= Implementations ================= // Serial int find_matrix_max_s(int N, int matrix[N][N]) { int toReturn = INT_MIN; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) if (matrix[i][j] > toReturn) toReturn = matrix[i][j]; return toReturn; } void fill_bins_s(int N, int M, int hist[], int matrix[N][N]) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { hist[k]++; break; } } } } } // Parallel-1 int find_matrix_max_p1(int N, int matrix[N][N]) { int toReturn = INT_MIN; int num_threads; #pragma omp parallel shared(matrix) num_threads(THREADS) { int i, j, start, end; #pragma omp single { num_threads = omp_get_num_threads(); } int pID = omp_get_thread_num(); start = pID * N / num_threads; end = ((pID + 1) * N) / num_threads; for (i = start; i < end; i++) for (j = 0; j < N; j++) if (matrix[i][j] > toReturn) #pragma omp critical toReturn = matrix[i][j]; }; return toReturn; } void fill_bins_p1(int N, int M, int hist[], int matrix[N][N]) { int num_threads; #pragma omp parallel num_threads(THREADS) { int i, j, start, end; int local_hist[HIST_BINS]; memset(local_hist, 0, sizeof(local_hist)); #pragma omp single { num_threads = omp_get_num_threads(); } int pID = omp_get_thread_num(); start = pID * N / num_threads; end = ((pID + 1) * N) / num_threads; for (i = start; i < end; i++) { for (j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { local_hist[k]++; break; } } } } #pragma omp critical for (i = 0; i < HIST_BINS; i++) { hist[i] += local_hist[i]; } }; } // Parallel-2 int find_matrix_max_p2(int N, int matrix[N][N]) { int toReturn = INT_MIN; #pragma omp parallel for num_threads(THREADS) SCHEDULE for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) if (matrix[i][j] > toReturn) toReturn = matrix[i][j]; return toReturn; } void fill_bins_p2(int N, int M, int hist[], int matrix[N][N]) { #pragma omp parallel num_threads(THREADS) { int local_bins[HIST_BINS]; memset(local_bins, 0, sizeof(local_bins)); #pragma omp for nowait SCHEDULE for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { local_bins[k]++; break; } } } } #pragma omp critical for (int i = 0; i < HIST_BINS; i++) { hist[i] += local_bins[i]; } }; } // Parallel-3 int find_matrix_max_p3(int N, int matrix[N][N]) { int toReturn = INT_MIN; #pragma omp parallel for reduction(max:toReturn) num_threads(THREADS) SCHEDULE for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) if (matrix[i][j] > toReturn) toReturn = matrix[i][j]; return toReturn; } void fill_bins_p3(int N, int M, int hist[], int matrix[N][N]) { #pragma omp parallel for reduction(+:hist[:HIST_BINS]) num_threads(THREADS) SCHEDULE for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { // Loop over bin indices for (int k = 0; k < HIST_BINS; k++) { int candidate = matrix[i][j]; if (k * M / HIST_BINS <= candidate && candidate < (k + 1) * M / HIST_BINS) { hist[k]++; break; } } } } }
GB_unaryop__minv_uint16_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint16_int8 // op(A') function: GB_tran__minv_uint16_int8 // C type: uint16_t // A type: int8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint16_int8 ( uint16_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ZQ_FaceDatabaseCompact.h
#ifndef _ZQ_FACE_DATABASE_COMPACT_H_ #define _ZQ_FACE_DATABASE_COMPACT_H_ #pragma once #include <malloc.h> #include <stdio.h> #include <vector> #include <omp.h> #include "ZQ_FaceRecognizerSphereFace.h" #include "ZQ_MathBase.h" #include "ZQ_MergeSort.h" namespace ZQ { class ZQ_FaceDatabaseCompact { enum CONST_VAL { FEAT_ALIGNED_SIZE = 32 }; public: ZQ_FaceDatabaseCompact() { dim = 0; person_num = 0; person_face_num = 0; total_face_num = 0; person_face_offset = 0; all_face_feats = 0; } ~ZQ_FaceDatabaseCompact() {} bool LoadFromFile(const char* feats_file, const char* names_file) { _clear(); if (!_load_feats(feats_file)) { _clear(); return false; } if (!_load_names(names_file)) { _clear(); return false; } if (person_num != names.size()) { _clear(); return false; } return true; } bool GenerateRandomDatabase(int num_person, int num_feat_per_person, int dim) { if (dim != 128 && dim != 256 && dim != 512) { printf("dim must be 128 or 256 or 512\n"); return false; } _clear(); std::vector<std::string> tmp_names; for (__int64 i = 0; i < num_person; i++) { char buf[200]; sprintf(buf, "%d", i); tmp_names.push_back(std::string(buf)); } int* tmp_person_face_num = (int*)malloc(sizeof(int)*num_person); for (int i = 0; i < num_person; i++) tmp_person_face_num[i] = num_feat_per_person; __int64* tmp_person_face_offset = (__int64*)malloc(sizeof(__int64)*num_person); for (__int64 i = 0; i < num_person; i++) { tmp_person_face_offset[i] = i * num_feat_per_person; } __int64 num_all_feats = num_person * num_feat_per_person; __int64 needed_bytes = num_all_feats * dim * sizeof(float); float* tmp_all_feats = (float*)_aligned_malloc(needed_bytes, FEAT_ALIGNED_SIZE); printf("need %d MB \n", needed_bytes / 1024 / 1024); if (tmp_all_feats == 0) { printf("failed to alloc memory, need %ld bytes\n", needed_bytes); return false; } for (__int64 i = 0; i < num_all_feats*dim; i++) { tmp_all_feats[i] = rand() % 1001 / 1000.0; } for (__int64 i = 0; i < num_all_feats; i++) { ZQ_MathBase::Normalize(dim, tmp_all_feats + i*dim); } printf("done\n"); this->dim = dim; this->person_num = num_person; this->person_face_num = tmp_person_face_num; this->total_face_num = num_all_feats; this->person_face_offset = tmp_person_face_offset; this->all_face_feats = tmp_all_feats; this->names.swap(tmp_names); return true; } bool Search(int feat_dim, int feat_num, const float* feat, std::vector<int>& out_ids, std::vector<float>& out_scores, std::vector<std::string>& out_names, int max_num, int max_thread_num) const { return _find_the_best_matches(feat_dim, feat_num, feat, out_ids, out_scores, out_names, max_num, max_thread_num); } bool ExportSimilarityForAllPairs(const std::string& out_score_file, const std::string& out_flag_file, __int64& all_pair_num, __int64& same_pair_num, __int64& notsame_pair_num, int max_thread_num, bool quantization) const { return _export_similarity_for_all_pairs(out_score_file,out_flag_file, all_pair_num, same_pair_num, notsame_pair_num, max_thread_num, quantization); } bool DetectRepeatPerson(const std::string& out_file, int max_thread_num, float similarity_thresh = 0.5, bool only_pivot = true) const { return _detect_repeat_person(out_file, max_thread_num, similarity_thresh, only_pivot); } private: int dim; int person_num; int* person_face_num; __int64 total_face_num; __int64* person_face_offset; float* all_face_feats; std::vector<std::string> names; private: void _clear() { dim = 0; person_num = 0; total_face_num = 0; if (person_face_num) { free(person_face_num); person_face_num = 0; } if (person_face_offset) { free(person_face_offset); person_face_offset = 0; } if (all_face_feats) { _aligned_free(all_face_feats); all_face_feats = 0; } names.clear(); } bool _load_feats(const char* file) { FILE* in = 0; #if defined(_WIN32) if (0 != fopen_s(&in, file, "rb")) { return false; } #else in = fopen(file, "rb"); if (in == NULL) return false; #endif if (1 != fread(&dim, sizeof(int), 1, in) || dim <= 0) { fclose(in); return false; } if (1 != fread(&person_num, sizeof(int), 1, in) || person_num <= 0) { fclose(in); return false; } person_face_num = (int*)malloc(sizeof(int)*person_num); person_face_offset = (__int64*)malloc(sizeof(__int64)*person_num); if (person_face_num == 0 || person_face_offset == 0) { fclose(in); return false; } if (person_num != fread(person_face_num, sizeof(int), person_num, in)) { fclose(in); return false; } total_face_num = 0; for (int i = 0; i < person_num; i++) { if (person_face_num[i] <= 0) { fclose(in); return false; } person_face_offset[i] = total_face_num; total_face_num += person_face_num[i]; } all_face_feats = (float*)_aligned_malloc(sizeof(float)*total_face_num*dim, FEAT_ALIGNED_SIZE); if (all_face_feats == 0) { fclose(in); return false; } if (total_face_num*dim != fread(all_face_feats, sizeof(float), total_face_num*dim, in)) { fclose(in); return false; } fclose(in); return true; } bool _load_names(const char* file) { FILE* in = 0; #if defined(_WIN32) if (0 != fopen_s(&in, file, "r")) return false; #else in = fopen(file, "r"); if (in == NULL) return false; #endif char line[200] = { 0 }; while (true) { line[0] = '\0'; fgets(line, 199, in); if (line[0] == '\0') break; int len = strlen(line); if (line[len - 1] == '\n') line[--len] = '\0'; names.push_back(std::string(line)); } fclose(in); return true; } bool _find_the_best_matches(int feat_dim, int feat_num, const float* feat, std::vector<int>& out_ids, std::vector<float>& out_scores, std::vector<std::string>& out_names, int max_num, int max_thread_num) const { if (person_num <= 0 || feat_dim != dim || feat_num <= 0) return false; __int64 widthStep = (sizeof(float)*dim + FEAT_ALIGNED_SIZE-1) / FEAT_ALIGNED_SIZE * FEAT_ALIGNED_SIZE; float* feat_aligned = (float*)_aligned_malloc(widthStep*feat_num, FEAT_ALIGNED_SIZE); if (feat_aligned == 0) return false; for(__int64 i = 0;i < feat_num;i++) memcpy(((char*)feat_aligned)+widthStep*i, feat+feat_dim*i, sizeof(float)*dim); float* scores = (float*)malloc(sizeof(float)*total_face_num); if (scores == 0) { _aligned_free(feat_aligned); return false; } for (__int64 i = 0; i < total_face_num; i++) scores[i] = -FLT_MAX; int num_procs = omp_get_num_procs(); int real_threads = __max(1, __min(max_thread_num, num_procs - 1)); if (real_threads == 1) { for (__int64 j = 0; j < feat_num; j++) { float* tmp_feat = (float*)(((char*)feat_aligned) + widthStep*j); __int64 chunk_size = (total_face_num + real_threads - 1) / real_threads; if (dim == 128) { for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i],ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim128(tmp_feat, all_face_feats + i*dim)); } } else if (dim == 256) { for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim256(tmp_feat, all_face_feats + i*dim)); } } else if (dim == 512) { for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim512(tmp_feat, all_face_feats + i*dim)); } } else { for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_MathBase::DotProduct(dim, tmp_feat, all_face_feats + i*dim)); } } } } else { for (__int64 j = 0; j < feat_num; j++) { float* tmp_feat = (float*)(((char*)feat_aligned) + widthStep*j); __int64 chunk_size = (total_face_num + real_threads - 1) / real_threads; if (dim == 128) { #pragma omp parallel for schedule(static, chunk_size) num_threads(real_threads) for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim128(tmp_feat, all_face_feats + i*dim)); } } else if (dim == 256) { #pragma omp parallel for schedule(static, chunk_size) num_threads(real_threads) for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim256(tmp_feat, all_face_feats + i*dim)); } } else if (dim == 512) { #pragma omp parallel for schedule(static, chunk_size) num_threads(real_threads) for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim512(tmp_feat, all_face_feats + i*dim)); } } else { #pragma omp parallel for schedule(static, chunk_size) num_threads(real_threads) for (__int64 i = 0; i < total_face_num; i++) { scores[i] = __max(scores[i], ZQ_MathBase::DotProduct(dim, tmp_feat, all_face_feats + i*dim)); } } } } float* max_scores = (float*)malloc(sizeof(float)*person_num); if (max_scores == 0) { _aligned_free(feat_aligned); free(scores); return false; } if (real_threads == 1) { for (__int64 i = 0; i < person_num; i++) { float tmp = -FLT_MAX; for (__int64 j = person_face_offset[i]; j < person_face_offset[i]+person_face_num[i]; j++) { tmp = __max(tmp, scores[j]); } max_scores[i] = tmp; } } else { int chunk_size = (person_num + real_threads - 1) / real_threads; #pragma omp parallel for schedule(static, chunk_size) num_threads(real_threads) for (__int64 i = 0; i < person_num; i++) { float tmp = -FLT_MAX; for (__int64 j = person_face_offset[i]; j < person_face_offset[i] + person_face_num[i]; j++) { tmp = __max(tmp, scores[j]); } max_scores[i] = tmp; } } _aligned_free(feat_aligned); free(scores); int* ids = (int*)malloc(sizeof(int)*person_num); if (ids == 0) { free(max_scores); return false; } for (__int64 i = 0; i < person_num; i++) { ids[i] = i; } out_ids.clear(); out_scores.clear(); out_names.clear(); for (__int64 i = 0; i < __min(max_num, person_num); i++) { float cur_max_score = max_scores[i]; int max_id = i; for (__int64 j = i + 1; j < person_num; j++) { if (cur_max_score < max_scores[j]) { max_id = j; cur_max_score = max_scores[j]; } } int tmp_id = ids[i]; ids[i] = ids[max_id]; ids[max_id] = tmp_id; float tmp_score = max_scores[i]; max_scores[i] = max_scores[max_id]; max_scores[max_id] = tmp_score; out_ids.push_back(ids[i]); out_scores.push_back(max_scores[i]); out_names.push_back(names[ids[i]]); } free(max_scores); free(ids); return true; } //must be aligned static float _compute_similarity(int dim, const float* v1, const float* v2) { if (dim == 128) return ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim128(v1, v2); else if (dim == 256) return ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim256(v1, v2); else if (dim == 512) return ZQ_FaceRecognizerSphereFace::_cal_similarity_avx_dim512(v1, v2); else return ZQ_MathBase::DotProduct(dim, v1, v2); } bool _export_similarity_for_all_pairs(const std::string& out_score_file, const std::string& out_flag_file, __int64& all_pair_num, __int64& same_pair_num, __int64& notsame_pair_num, int max_thread_num, bool quantization) const { FILE* out1 = 0; #if defined(_WIN32) if (0 != fopen_s(&out1, out_score_file.c_str(), "wb")) { printf("failed to create file %s\n", out_score_file.c_str()); return false; } #else out1 = fopen(out_score_file.c_str(), "wb"); if (out1 == NULL) { printf("failed to create file %s\n", out_score_file.c_str()); return false; } #endif FILE* out2 = 0; #if defined(_WIN32) if (0 != fopen_s(&out2, out_flag_file.c_str(), "wb")) { printf("failed to create file %s\n", out_flag_file.c_str()); fclose(out1); return false; } #else out2 = fopen(out_flag_file.c_str(), "wb"); if (out2 == NULL) { printf("failed to create file %s\n", out_flag_file.c_str()); fclose(out1); return false; } #endif all_pair_num = total_face_num *(total_face_num - 1) / 2; same_pair_num = 0; notsame_pair_num = 0; //fprintf(out, "%lld\n", total_pair_num); int real_thread_num = __max(1, __min(max_thread_num, omp_get_num_procs() - 1)); if (real_thread_num == 1) { for (int pp = 0; pp < person_num; pp++) { __int64 cur_face_offset = person_face_offset[pp]; __int64 cur_face_num = person_face_num[pp]; __int64 max_pair_num = (total_face_num - cur_face_offset - 1); std::vector<float> scores(max_pair_num); std::vector<char> flags(max_pair_num); for (__int64 i = 0; i < cur_face_num; i++) { float* cur_i_feat = all_face_feats + (cur_face_offset + i)*dim; float* cur_j_feat; int idx = 0; for (__int64 j = i + 1; j < cur_face_num; j++) { cur_j_feat = all_face_feats + (cur_face_offset + j)*dim; scores[idx] = _compute_similarity(dim, cur_i_feat, cur_j_feat); flags[idx] = 1; same_pair_num++; idx++; } if (pp + 1 < person_num) { for (__int64 j = person_face_offset[pp + 1]; j < total_face_num; j++) { cur_j_feat = all_face_feats + j*dim; scores[idx] = _compute_similarity(dim, cur_i_feat, cur_j_feat); flags[idx] = 0; notsame_pair_num++; idx++; } } if (idx > 0) { if (quantization) { std::vector<short> short_scores(idx); for (int j = 0; j < idx; j++) short_scores[j] = __min(SHRT_MAX, __max(-SHRT_MAX, scores[j] * SHRT_MAX)); fwrite(&short_scores[0], sizeof(short), idx, out1); } else { fwrite(&scores[0], sizeof(float), idx, out1); } fwrite(&flags[0], 1, idx, out2); } } printf("%d/%d handled\n", pp + 1, person_num); } } else { int chunk_size = 100; int handled[1] = { 0 }; __int64 tmp_same_pair_num[1] = { 0 }; printf("real_thread_num = %d\n", real_thread_num); #pragma omp parallel for schedule(dynamic,chunk_size) num_threads(real_thread_num) shared(handled) for (int pp = 0; pp < person_num; pp++) { __int64 cur_face_offset = person_face_offset[pp]; __int64 cur_face_num = person_face_num[pp]; __int64 max_pair_num = (total_face_num - cur_face_offset-1); std::vector<float> scores(max_pair_num); std::vector<char> flags(max_pair_num); for (__int64 i = 0; i < cur_face_num; i++) { float* cur_i_feat = all_face_feats + (cur_face_offset + i)*dim; float* cur_j_feat; int idx = 0; for (__int64 j = i+1; j < cur_face_num; j++) { cur_j_feat = all_face_feats + (cur_face_offset + j)*dim; scores[idx] = _compute_similarity(dim, cur_i_feat, cur_j_feat); flags[idx] = 1; idx++; } if (pp + 1 < person_num) { for (__int64 j = person_face_offset[pp + 1]; j < total_face_num; j++) { cur_j_feat = all_face_feats + j*dim; scores[idx] = _compute_similarity(dim, cur_i_feat, cur_j_feat); flags[idx] = 0; idx++; } } #pragma omp critical { if (idx > 0) { for (int kk = 0; kk < idx; kk++) { (*tmp_same_pair_num) += flags[kk]; } if (quantization) { std::vector<short> short_scores(idx); for (int j = 0; j < idx; j++) short_scores[j] = __min(SHRT_MAX, __max(-SHRT_MAX, scores[j] * SHRT_MAX)); fwrite(&short_scores[0], sizeof(short), idx, out1); } else { fwrite(&scores[0], sizeof(float), idx, out1); } fwrite(&flags[0], 1, idx, out2); } } } #pragma omp critical { (*handled) ++; printf("%d/%d\n", *handled, person_num); } } same_pair_num = tmp_same_pair_num[0]; notsame_pair_num = all_pair_num - same_pair_num; } fclose(out1); fclose(out2); return true; } bool _detect_repeat_person(const std::string& out_file, int max_thread_num, float similarity_thresh, bool only_pivot) const { std::vector<std::pair<int, int> > repeat_pairs; std::vector<float> scores; if (!_detect_repeat_person(repeat_pairs, scores, max_thread_num, similarity_thresh, only_pivot)) { return false; } __int64 num = scores.size(); printf("num = %lld\n", num); if (num > 0) { ZQ_MergeSort::MergeSortWithData(&scores[0], &repeat_pairs[0], sizeof(std::pair<int, int>), num, false); } FILE* out = 0; #if defined(_WIN32) if (0 != fopen_s(&out, out_file.c_str(), "w")) { return false; } #else out = fopen(out_file.c_str(), "w"); if (out == NULL) { return false; } #endif for (__int64 i = 0; i < num; i++) { fprintf(out, "%.3f %s %s\n", scores[i], names[repeat_pairs[i].first].c_str(), names[repeat_pairs[i].second].c_str()); } fclose(out); return true; } bool _detect_repeat_person(std::vector<std::pair<int, int> >& repeat_pairs, std::vector<float>& repeat_scores, int max_thread_num, float similarity_thresh, bool only_pivot) const { repeat_pairs.clear(); repeat_scores.clear(); if (only_pivot) { std::vector<int> pivot_ids(person_num); if (max_thread_num <= 1) { for (int p = 0; p < person_num; p++) { __int64 cur_offset = person_face_offset[p]; __int64 cur_num = person_face_num[p]; std::vector<float> scores(cur_num*cur_num); int idx = 0; for (__int64 i = 0; i < cur_num; i++) { float* cur_i_feat = all_face_feats + (cur_offset + i)*dim; float* cur_j_feat; scores[i*cur_num + i] = 1; for (__int64 j = i + 1; j < cur_num; j++) { cur_j_feat = all_face_feats + (cur_offset + j)*dim; float tmp_score = _compute_similarity(dim, cur_i_feat, cur_j_feat); scores[i*cur_num + j] = tmp_score; scores[j*cur_num + i] = tmp_score; } } int pivot_id = -1; float sum_score = -FLT_MAX; for (int i = 0; i < cur_num; i++) { float tmp_sum = 0; for (int j = 0; j < cur_num; j++) tmp_sum += scores[i*cur_num + j]; if (sum_score < tmp_sum) { pivot_id = i; sum_score = tmp_sum; } } pivot_ids[p] = pivot_id; } // for (int i = 0; i < person_num; i++) { for (int j = i + 1; j < person_num; j++) { const float* cur_i_feat = all_face_feats + (person_face_offset[i] + pivot_ids[i])*dim; const float* cur_j_feat = all_face_feats + (person_face_offset[j] + pivot_ids[j])*dim; float tmp_score = _compute_similarity(dim, cur_i_feat, cur_j_feat); if (tmp_score >= similarity_thresh) { repeat_pairs.push_back(std::make_pair(i, j)); repeat_scores.push_back(tmp_score); } } } } else { int chunk_size = (person_num + max_thread_num - 1) / max_thread_num; #pragma omp parallel for schedule(static,chunk_size) num_threads(max_thread_num) for (int p = 0; p < person_num; p++) { __int64 cur_offset = person_face_offset[p]; __int64 cur_num = person_face_num[p]; std::vector<float> scores(cur_num*cur_num); int idx = 0; for (__int64 i = 0; i < cur_num; i++) { float* cur_i_feat = all_face_feats + (cur_offset + i)*dim; float* cur_j_feat; scores[i*cur_num + i] = 1; for (__int64 j = i + 1; j < cur_num; j++) { cur_j_feat = all_face_feats + (cur_offset + j)*dim; float tmp_score = _compute_similarity(dim, cur_i_feat, cur_j_feat); scores[i*cur_num + j] = tmp_score; scores[j*cur_num + i] = tmp_score; } } int pivot_id = -1; float sum_score = -FLT_MAX; for (int i = 0; i < cur_num; i++) { float tmp_sum = 0; for (int j = 0; j < cur_num; j++) tmp_sum += scores[i*cur_num + j]; if (sum_score < tmp_sum) { pivot_id = i; sum_score = tmp_sum; } } pivot_ids[p] = pivot_id; } #pragma omp parallel for schedule(static,chunk_size) num_threads(max_thread_num) for (int i = 0; i < person_num; i++) { for (int j = i + 1; j < person_num; j++) { const float* cur_i_feat = all_face_feats + (person_face_offset[i] + pivot_ids[i])*dim; const float* cur_j_feat = all_face_feats + (person_face_offset[j] + pivot_ids[j])*dim; float tmp_score = _compute_similarity(dim, cur_i_feat, cur_j_feat); if (tmp_score >= similarity_thresh) { #pragma omp critical { repeat_pairs.push_back(std::make_pair(i, j)); repeat_scores.push_back(tmp_score); } } } } } } else { if (max_thread_num <= 1) { int handled[1] = { 0 }; for (int i = 0; i < person_num; i++) { for (int j = i + 1; j < person_num; j++) { float max_score = -FLT_MAX; for (int s = 0; s < person_face_num[i]; s++) { for (int t = 0; t < person_face_num[j]; t++) { const float* cur_i_feat = all_face_feats + (person_face_offset[i] + s)*dim; const float* cur_j_feat = all_face_feats + (person_face_offset[j] + t)*dim; float tmp_score = _compute_similarity(dim, cur_i_feat, cur_j_feat); max_score = __max(max_score, tmp_score); } } if (max_score >= similarity_thresh) { repeat_pairs.push_back(std::make_pair(i, j)); repeat_scores.push_back(max_score); } } handled[0]++; if (handled[0] % 10 == 0) { printf("%d/%d handled\n", handled[0], person_num); } } } else { int handled[1] = { 0 }; int chunk_size = (person_num + max_thread_num - 1) / max_thread_num; #pragma omp parallel for schedule(static,chunk_size) num_threads(max_thread_num) for (int i = 0; i < person_num; i++) { for (int j = i + 1; j < person_num; j++) { float max_score = -FLT_MAX; for (int s = 0; s < person_face_num[i]; s++) { for (int t = 0; t < person_face_num[j]; t++) { const float* cur_i_feat = all_face_feats + (person_face_offset[i] + s)*dim; const float* cur_j_feat = all_face_feats + (person_face_offset[j] + t)*dim; float tmp_score = _compute_similarity(dim, cur_i_feat, cur_j_feat); max_score = __max(max_score, tmp_score); } } if (max_score >= similarity_thresh) { #pragma omp critical { repeat_pairs.push_back(std::make_pair(i, j)); repeat_scores.push_back(max_score); } } } #pragma omp critical { handled[0] ++; if (handled[0] % 10 == 0) { printf("%d/%d handled\n", handled[0], person_num); } } } } } return true; } }; } #endif
phostone.c
// Normal compile // Intel: // mpiicc -qopenmp phostone.c -o phostone // gcc: // mpicc -qopenmp phostone.c -o phostone // // To compile without openmp // Intel: // mpiicc -qopenmp-stubs phostone.c -o purempi // gcc: // mpicc -DSTUBS phostone.c -o purempi // // #include <ctype.h> #include <math.h> #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <time.h> #include <utmpx.h> // which processor on a node will // print env if requested #ifndef PID #define PID 0 #endif void dothreads(int full, char *myname, int myid, int mycolor, int new_id); char *trim(char *s); void slowit(int nints, int val); int node_color(); int sched_getcpu(); void ptime() { time_t rawtime; struct tm *timeinfo; char buffer[80]; time(&rawtime); timeinfo = localtime(&rawtime); strftime(buffer, 80, "%c", timeinfo); // puts (buffer); printf("%s\n", buffer); } int findcore() { int cpu; #ifdef __APPLE__ cpu = -1; #else cpu = sched_getcpu(); #endif return cpu; } int str_upr(char *cstr) { char *str = cstr; for (; *str; str++) { if (isalpha(*str)) if (*str >= 'a') { *str += 'A' - 'a'; } } return 0; } int str_low(char *cstr) { char *str = cstr; for (; *str; str++) { if (isalpha(*str)) if (*str < 'a') { *str += 'a' - 'A'; } } return 0; } void dohelp(); void dohelp() { /************************************************************ * This is a glorified hello world program. Each processor * prints name, rank, and other information as described below. * ************************************************************/ printf("phostname arguments:\n"); printf(" -h : Print this help message\n"); printf("\n"); printf("no arguments : Print a list of the nodes on which the command is " "run.\n"); printf("\n"); printf(" -f or -1 : Same as no argument but print MPI task id and Thread " "id\n"); printf(" If run with OpenMP threading enabled OMP_NUM_THREADS " "> 1\n"); printf(" there will be a line per MPI task and Thread.\n"); printf("\n"); printf(" -F or -2 : Add columns to tell first MPI task on a node and and " "the\n"); printf(" numbering of tasks on a node. (Hint: pipe this output " "in\n"); printf(" to sort -r\n"); printf("\n"); printf(" -E or -B : Print thread info at 'E'nd of the run or 'B'oth the " "start and end\n"); printf("\n"); printf(" -a : Print a listing of the environmental variables passed " "to\n"); printf(" MPI task. (Hint: use the -l option with SLURM to " "prepend MPI\n"); printf(" task #.)\n"); printf("\n"); printf(" -s ######## : Where ######## is an integer. Sum a bunch on " "integers to slow\n"); printf(" down the program. Should run faster with multiple " "threads.\n"); printf("\n"); printf(" -t ######## : Where is a time in seconds. Sum a bunch on integers " "to slow\n"); printf(" down the program and run for at least the given " "seconds.\n"); printf("\n"); printf(" -T : Print time/date at the beginning/end of the run.\n"); printf("\n"); } /* valid is used to get around an issue in some versions of * MPI that screw up the environmnet passed to programs. Its * usage is not recommended. See: * https://wiki.sei.cmu.edu/confluence/display/c/MEM10-C.+Define+and+use+a+pointer+validation+function * * "The valid() function does not guarantee validity; it only * identifies null pointers and pointers to functions as invalid. * However, it can be used to catch a substantial number of * problems that might otherwise go undetected." */ int valid(void *p) { extern char _etext; return (p != NULL) && ((char *)p > &_etext); } char f1234[128], f1235[128], f1236[128]; int main(int argc, char **argv, char *envp[]) { char *eql; int myid, numprocs, resultlen; int mycolor, new_id, new_nodes; int i, k; MPI_Comm node_comm; char lname[MPI_MAX_PROCESSOR_NAME]; //#ifdef MPI_MAX_LIBRARY_VERSION_STRING char version[MPI_MAX_LIBRARY_VERSION_STRING]; //#else // char version[40]; //#endif char *myname, *cutit; int full, envs, iarg, tn, nt, help, slow, vlan, wait, dotime, when; int nints; double t1, t2, dt; /* Format statements */ // char *f1234="%4.4d %4.4d %18s %4.4d %4.4d // %4.4d\n"; char *f1235="%s %4.4d %4.4d\n"; char *f1236="%s\n"; strcpy(f1234, "%4.4d %4.4d %18s %4.4d %4.4d %4.4d\n"); strcpy(f1235, "%s %4.4d %4.4d\n"); strcpy(f1236, "%s\n"); MPI_Init(&argc, &argv); //#ifdef MPI_MAX_LIBRARY_VERSION_STRING MPI_Get_library_version(version, &vlan); //#else // sprintf(version,"%s","UNDEFINED - consider upgrading"); //#endif MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); MPI_Get_processor_name(lname, &resultlen); /* Get rid of "stuff" from the processor name. */ myname = trim(lname); /* The next line is required for BGQ because the MPI task ID is encoded in the processor name and we don't want it. */ if (strrchr(myname, 32)) myname = strrchr(myname, 32); /* Here we cut off the tail of node name, Summit in this case */ cutit = strstr(myname, ".rc.int.colorado.edu"); if (cutit) cutit[0] = (char)0; slow = 0; wait = 0; /* read in command line args from task 0 */ if (myid == 0) { full = 0; envs = 0; help = 0; dotime = 0; when = 1; if (argc > 1) { for (iarg = 1; iarg < argc; iarg++) { if ((strcmp(argv[iarg], "-h") == 0) || (strcmp(argv[iarg], "--h") == 0) || (strcmp(argv[iarg], "-help") == 0)) help = 1; /**/ if ((strcmp(argv[iarg], "-f") == 0) || (strcmp(argv[iarg], "-1") == 0)) full = 1; /**/ if ((strcmp(argv[iarg], "-F") == 0) || (strcmp(argv[iarg], "-2") == 0)) full = 2; /**/ if (strcmp(argv[iarg], "-s") == 0) slow = 1; /**/ if (strcmp(argv[iarg], "-t") == 0) wait = 1; /**/ if (strcmp(argv[iarg], "-a") == 0) envs = 1; /**/ if (strcmp(argv[iarg], "-T") == 0) dotime = 1; if (strcmp(argv[iarg], "-B") == 0) when = 3; if (strcmp(argv[iarg], "-E") == 0) when = 2; } } } /* send info to all tasks, if doing help doit and quit */ MPI_Bcast(&help, 1, MPI_INT, 0, MPI_COMM_WORLD); if (help == 1) { if (myid == 0) dohelp(); MPI_Finalize(); exit(0); } MPI_Bcast(&full, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&envs, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&when, 1, MPI_INT, 0, MPI_COMM_WORLD); if (myid == 0 && dotime == 1) ptime(); if (myid == 0 && full == 2) { printf("MPI VERSION %s\n", version); printf("task thread node name first task # on node " "core\n"); } /*********/ /* The routine NODE_COLOR will return the same value for all mpi tasks that are running on the same node. We use this to create a new communicator from which we get the numbering of tasks on a node. */ // NODE_COLOR(&mycolor); mycolor = node_color(); MPI_Comm_split(MPI_COMM_WORLD, mycolor, myid, &node_comm); MPI_Comm_rank(node_comm, &new_id); MPI_Comm_size(node_comm, &new_nodes); tn = -1; nt = -1; /* Here we print out the information with the format and verbosity determined by the value of full. We do this a task at a time to "hopefully" get a bit better formatting. */ for (i = 0; i < numprocs; i++) { MPI_Barrier(MPI_COMM_WORLD); if (i != myid) continue; if (when == 3) str_low(myname); if (when != 2) dothreads(full, myname, myid, mycolor, new_id); /* here we print out the environment in which a MPI task is running */ /* We try to determine if the passed environment is valid but sometimes * it just does not work and this can crash. Try taking out myid==0 * and setting PID to a nonzero value. */ // if (envs == 1 && new_id==1) { if (envs == 1 && (myid == PID || myid == 0)) { k = 0; if (valid(envp) == 1) { // while(envp[k]) { while (valid(envp[k]) == 1) { if (strlen(envp[k]) > 3) { eql = strchr(envp[k], '='); if (eql == NULL) break; printf("? %d %s\n", myid, envp[k]); } else { break; } // printf("? %d %d\n",myid,k); k++; } } else { printf("? %d %s\n", myid, "Environmnet not set"); } } } if (myid == 0) { dt = 0; if (wait) { slow = 0; for (iarg = 1; iarg < argc; iarg++) { // printf("%s\n",argv[iarg]); if (atof(argv[iarg]) > 0) dt = atof(argv[iarg]); } } } MPI_Bcast(&dt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); if (dt > 0) { nints = 100000; t1 = MPI_Wtime(); t2 = t1; while (dt > t2 - t1) { for (i = 1; i <= 1000; i++) { slowit(nints, i); } t2 = MPI_Wtime(); } if (myid == 0) printf("total time %10.3f\n", t2 - t1); nints = 0; } if (myid == 0) { nints = 0; if (slow == 1) { for (iarg = 1; iarg < argc; iarg++) { if (atol(argv[iarg]) > 0) nints = atoi(argv[iarg]); } } } MPI_Bcast(&nints, 1, MPI_INT, 0, MPI_COMM_WORLD); if (nints > 0) { t1 = MPI_Wtime(); for (i = 1; i <= 1000; i++) { slowit(nints, i); } t2 = MPI_Wtime(); if (myid == 0) printf("total time %10.3f\n", t2 - t1); } if (myid == 0 && dotime == 1) ptime(); if (when > 1) { for (i = 0; i < numprocs; i++) { MPI_Barrier(MPI_COMM_WORLD); if (i != myid) continue; if (when == 3) str_upr(myname); dothreads(full, myname, myid, mycolor, new_id); } } MPI_Finalize(); return 0; } char *trim(char *s) { int i = 0; int j = strlen(s) - 1; int k = 0; while (isspace(s[i]) && s[i] != '\0') i++; while (isspace(s[j]) && j >= 0) j--; while (i <= j) s[k++] = s[i++]; s[k] = '\0'; return s; } /* ! return a integer which is unique to all mpi ! tasks running on a particular node. It is ! equal to the id of the first MPI task running ! on a node. This can be used to create ! MPI communicators which only contain tasks on ! a node. */ #include <mpi.h> #include <string.h> int node_color() { int mycol; MPI_Status status; int xchng, i, n2, myid, numprocs; int nlen; int ie; char *pch; char name[MPI_MAX_PROCESSOR_NAME + 1]; char nlist[MPI_MAX_PROCESSOR_NAME + 1]; MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); MPI_Get_processor_name(name, &nlen); pch = strrchr(name, ' '); if (pch) { ie = strlen(pch + 1); memmove(&name[0], pch + 1, ie + 1); memmove(&nlist[0], pch + 1, ie + 1); } else { strcpy(nlist, name); } mycol = myid; n2 = 1; while (n2 < numprocs) { n2 = n2 * 2; } for (i = 1; i <= n2 - 1; i++) { xchng = i ^ myid; if (xchng <= (numprocs - 1)) { if (myid < xchng) { MPI_Send(name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, xchng, 12345, MPI_COMM_WORLD); MPI_Recv(nlist, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, xchng, 12345, MPI_COMM_WORLD, &status); } else { MPI_Recv(nlist, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, xchng, 12345, MPI_COMM_WORLD, &status); MPI_Send(name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, xchng, 12345, MPI_COMM_WORLD); } if (strcmp(nlist, name) == 0 && xchng < mycol) mycol = xchng; } else { /* skip this stage */ } } return mycol; } void slowit(int nints, int val) { int *block; long i, sum; #ifdef VERBOSET double t2, t1; t1 = MPI_Wtime(); #endif block = (int *)malloc(nints * sizeof(int)); #pragma omp parallel for for (i = 0; i < nints; i++) { block[i] = val; } sum = 0; #pragma omp parallel for reduction(+ : sum) for (i = 0; i < nints; i++) { sum = sum + block[i]; } #ifdef VERBOSET t2 = MPI_Wtime(); printf("sum of integers %ld %10.3f\n", sum, t2 - t1); #endif free(block); } #ifdef STUBS int omp_get_thread_num(void) { return 0; } int omp_get_num_threads(void) { return 1; } #endif void dothreads(int full, char *myname, int myid, int mycolor, int new_id) { int nt, tn; #pragma omp parallel { nt = omp_get_num_threads(); if (nt == 0) nt = 1; #pragma omp critical { if (nt < 2) { nt = 1; tn = 0; } else { tn = omp_get_thread_num(); } if (full == 0) { if (tn == 0) printf(f1236, trim(myname)); } if (full == 1) { printf(f1235, trim(myname), myid, tn); } if (full == 2) { printf(f1234, myid, tn, trim(myname), mycolor, new_id, findcore()); } } } }
par_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterp *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int print_level = 0; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; print_level = 1; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } //fine_to_coarse[i] += my_first_cpt+coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { diagonal += A_diag_data[jj]; } } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { diagonal += A_offd_data[jj]; } } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { if (print_level) { hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); } for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] = 0.0; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] = 0.0; } } else { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) { if (CF_marker[i] == -3) CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpHE * interpolation routine for hyperbolic PDEs * treats weak fine connections like strong fine connections *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and influences i, * distribute a_{i,i1} to C-points that strongly influence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { sum += A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(A_ext); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildDirInterp *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real diagonal; HYPRE_Real sum_N_pos, sum_P_pos; HYPRE_Real sum_N_neg, sum_P_neg; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { HYPRE_Int *P_marker, *P_marker_offd; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ sum_N_pos = 0; sum_N_neg = 0; sum_P_pos = 0; sum_P_neg = 0; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (num_functions == 1 || dof_func[i1] == dof_func[i]) { if (A_diag_data[jj] > 0) sum_N_pos += A_diag_data[jj]; else sum_N_neg += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; if (A_diag_data[jj] > 0) sum_P_pos += A_diag_data[jj]; else sum_P_neg += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (num_functions == 1 || dof_func_offd[i1] == dof_func[i]) { if (A_offd_data[jj] > 0) sum_N_pos += A_offd_data[jj]; else sum_N_neg += A_offd_data[jj]; } /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; if (A_offd_data[jj] > 0) sum_P_pos += A_offd_data[jj]; else sum_P_neg += A_offd_data[jj]; } } } if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal; if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { if (P_diag_data[jj]> 0) P_diag_data[jj] *= -beta; else P_diag_data[jj] *= -alfa; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { if (P_offd_data[jj]> 0) P_offd_data[jj] *= -beta; else P_offd_data[jj] *= -alfa; } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, HYPRE_Int interp_type, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) hypre_NvtxPushRange("DirInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts,col_offd_S_to_A, interp_type, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts,col_offd_S_to_A, P_ptr); } #if defined(HYPRE_USING_CUDA) hypre_NvtxPopRange(); #endif return ierr; } /*------------------------------------------------ * Drop entries in interpolation matrix P * *------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P, HYPRE_Real trunc_factor, HYPRE_Int max_elmts) { HYPRE_Int rescale = 1; // rescale P HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach. * here we need to pass in a strength matrix built on the entire matrix. * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int kc; HYPRE_BigInt big_k; HYPRE_Int start; HYPRE_Int sgn; HYPRE_Int c_num; HYPRE_Real diagonal; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int print_level = 0; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; print_level = 1; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_A_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of A *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); } index = 0; for (i=0; i < num_cols_A_offd; i++) { for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++) { big_k = A_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { A_ext_j[index] = big_k - col_1; A_ext_data[index++] = A_ext_data[j]; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd); if (kc > -1) { A_ext_j[index] = (HYPRE_BigInt)(-kc-1); A_ext_data[index++] = A_ext_data[j]; } } } A_ext_i[i] = index; } for (i = num_cols_A_offd; i > 0; i--) A_ext_i[i] = A_ext_i[i-1]; if (num_procs > 1) A_ext_i[0] = 0; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*#ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { if (col_offd_S_to_A) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = col_offd_S_to_A[S_offd_j[jj]]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } else { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; } } } } jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; /* Loop over ith row of A. First, the diagonal part of A */ for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. HERE, we only want to distribut to points of the SAME function type *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0 ) { sum += A_diag_data[jj1]; } } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { sum += A_offd_data[jj1]; } } } } if (sum != 0) { distribute = A_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of A for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_diag_data[jj1]; } } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) { P_offd_data[P_marker_offd[i2]] += distribute * A_offd_data[jj1]; } } } } } else /* sum = 0 - only add to diag if the same function type */ { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. (only if the same function type) *--------------------------------------------------------------*/ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } /*---------------------------------------------------------------- * Still looping over ith row of A. Next, loop over the * off-diagonal part of A *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. AGAIN, we only want to distribut to points of the SAME function type *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = A_offd_j[jj]; sgn = 1; if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1; for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (i2 > -1) { /* in the diagonal block */ if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) { sum += A_ext_data[jj1]; } } } } if (sum != 0) { distribute = A_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of A_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++) { i2 = (HYPRE_Int)A_ext_j[jj1]; if (num_functions == 1 || dof_func[i1] == dof_func[i2]) { if (i2 > -1) /* in the diagonal block */ { if (P_marker[i2] >= jj_begin_row && (sgn*A_ext_data[jj1]) < 0) { P_diag_data[P_marker[i2]] += distribute * A_ext_data[jj1]; } } else { /* in the off_diagonal block */ if (P_marker_offd[-i2-1] >= jj_begin_row_offd && (sgn*A_ext_data[jj1]) < 0) P_offd_data[P_marker_offd[-i2-1]] += distribute * A_ext_data[jj1]; } } } } else /* sum = 0 */ { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ if (diagonal == 0.0) { if (print_level) hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i); for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] = 0.0; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] = 0.0; } } else { for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= -diagonal; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= -diagonal; } } } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGTruncandBuild *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P, HYPRE_Real trunc_factor, HYPRE_Int max_elmts) { hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P); hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P); HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd); HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd); HYPRE_BigInt *new_col_map_offd; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int P_offd_size=0, new_num_cols_offd; HYPRE_Int *P_marker; HYPRE_Int i; HYPRE_Int index; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_offd_j = hypre_CSRMatrixJ(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_size = P_offd_i[n_fine]; } new_num_cols_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < num_cols_offd; i++) P_marker[i] = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { new_num_cols_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < new_num_cols_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } /*#define HYPRE_SMP_PRIVATE i #include "../utilities/hypre_smp_forloop.h"*/ for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], new_num_cols_offd); } index = 0; for (i = 0; i < new_num_cols_offd; i++) { while (P_marker[index] == 0) index++; new_col_map_offd[i] = col_map_offd[index]; index++; } if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (new_num_cols_offd) { hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd; } if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P); hypre_MatvecCommPkgCreate(P); return hypre_error_flag; } hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A, HYPRE_Real w) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrix *C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Real *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Real *C_offd_data; HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j; HYPRE_BigInt *col_map_offd_C; HYPRE_Int i, j, index; HYPRE_Real invdiag; HYPRE_Real w_local = w; C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts, row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]); hypre_ParCSRMatrixInitialize(C); C_diag = hypre_ParCSRMatrixDiag(C); C_offd = hypre_ParCSRMatrixOffd(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (i=0; i < num_cols_offd; i++) col_map_offd_C[i] = col_map_offd_A[i]; for (i=0; i < num_rows; i++) { index = A_diag_i[i]; invdiag = -w/A_diag_data[index]; C_diag_data[index] = 1.0-w; C_diag_j[index] = A_diag_j[index]; if (w == 0) { w_local = fabs(A_diag_data[index]); for (j = index+1; j < A_diag_i[i+1]; j++) w_local += fabs(A_diag_data[j]); for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) w_local += fabs(A_offd_data[j]); invdiag = -1/w_local; C_diag_data[index] = 1.0-A_diag_data[index]/w_local; } C_diag_i[i] = index; C_offd_i[i] = A_offd_i[i]; for (j = index+1; j < A_diag_i[i+1]; j++) { C_diag_data[j] = A_diag_data[j]*invdiag; C_diag_j[j] = A_diag_j[j]; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { C_offd_data[j] = A_offd_data[j]*invdiag; C_offd_j[j] = A_offd_j[j]; } } C_diag_i[num_rows] = A_diag_i[num_rows]; C_offd_i[num_rows] = A_offd_i[num_rows]; return C; } /* RL */ HYPRE_Int hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; /* csr's */ hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; /* arrays */ HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int num_cols_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_BigInt *col_map_offd_P = NULL; /* CF marker off-diag part */ HYPRE_Int *CF_marker_offd = NULL; /* func type off-diag part */ HYPRE_Int *dof_func_offd = NULL; /* nnz */ HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd; HYPRE_Int *marker_diag, *marker_offd = NULL; /* local size */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); /* number of C-pts */ HYPRE_Int n_cpts = 0; /* fine to coarse mapping: diag part and offd part */ HYPRE_Int *fine_to_coarse; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_BigInt total_global_cpts, my_first_cpt; HYPRE_Int my_id, num_procs; HYPRE_Int num_sends; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; //HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A); //HYPRE_Int col_end = col_start + n_fine; HYPRE_Int i, j, i1, j1, k1, index, start; HYPRE_Int *max_abs_cij; char *max_abs_diag_offd; HYPRE_Real max_abs_aij, vv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ /* CF marker for the off-diag columns */ if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); } /* function type indicator for the off-diag columns */ if (num_functions > 1 && num_cols_A_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); } /* if CommPkg of A is not present, create it */ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* number of sends to do (number of procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* send buffer, of size send_map_starts[num_sends]), * i.e., number of entries to send */ int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST); /* copy CF markers of elements to send to buffer * RL: why copy them with two for loops? Why not just loop through all in one */ index = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); /* loop through all elems to send_proc[i] */ for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { /* CF marker of send_map_elemts[j] */ int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); /* do a similar communication for dof_func */ if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping, * and find the most strongly influencing C-pt for each F-pt *-----------------------------------------------------------------------*/ /* nnz in diag and offd parts */ cnt_diag = 0; cnt_offd = 0; max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); /* markers initialized as zeros */ marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST); marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { //fine_to_coarse[i] = my_first_cpt + n_cpts; fine_to_coarse[i] = n_cpts; n_cpts++; continue; } /* mark all the strong connections: in S */ HYPRE_Int MARK = i + 1; /* loop through row i of S, diag part */ for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { marker_diag[S_diag_j[j]] = MARK; } /* loop through row i of S, offd part */ if (num_procs > 1) { for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { j1 = col_offd_S_to_A ? col_offd_S_to_A[S_offd_j[j]] : S_offd_j[j]; marker_offd[j1] = MARK; } } fine_to_coarse[i] = -1; /*--------------------------------------------------------------------------- * If i is an F-pt, interpolation is from the most strongly influencing C-pt * Find this C-pt and save it *--------------------------------------------------------------------------*/ /* if we failed to find any strong C-pt, mark this point as an 'n' */ char marker = 'n'; /* max abs val */ max_abs_aij = -1.0; /* loop through row i of A, diag part */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { i1 = A_diag_j[j]; vv = fabs(A_diag_data[j]); #if 0 /* !!! this is a hack just for code verification purpose !!! it basically says: 1. if we see |a_ij| < 1e-14, force it to be 1e-14 2. if we see |a_ij| == the max(|a_ij|) so far exactly, replace it if the j idx is smaller Reasons: 1. numerical round-off for eps-level values 2. entries in CSR rows may be listed in different orders */ vv = vv < 1e-14 ? 1e-14 : vv; if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv == max_abs_aij && i1 < max_abs_cij[i]) { /* mark it as a 'd' */ marker = 'd'; max_abs_cij[i] = i1; max_abs_aij = vv; continue; } #endif /* it is a strong C-pt and has abs val larger than what have seen */ if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij) { /* mark it as a 'd' */ marker = 'd'; max_abs_cij[i] = i1; max_abs_aij = vv; } } /* offd part */ if (num_procs > 1) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { i1 = A_offd_j[j]; vv = fabs(A_offd_data[j]); if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij) { /* mark it as an 'o' */ marker = 'o'; max_abs_cij[i] = i1; max_abs_aij = vv; } } } max_abs_diag_offd[i] = marker; if (marker == 'd') { cnt_diag ++; } else if (marker == 'o') { cnt_offd ++; } } nnz_diag = cnt_diag + n_cpts; nnz_offd = cnt_offd; /*------------- allocate arrays */ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST); /* not in ``if num_procs > 1'', * allocation needed even for empty CSR */ P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST); /* redundant */ P_diag_i[0] = 0; P_offd_i[0] = 0; /* reset counters */ cnt_diag = 0; cnt_offd = 0; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST); big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { big_int_buf_data[index++] = my_first_cpt +(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * Second Pass: Populate P *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { if (CF_marker[i] >= 0) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. *--------------------------------------------------------------------*/ //P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt; P_diag_j[cnt_diag] = fine_to_coarse[i]; P_diag_data[cnt_diag++] = 1.0; } else { /*--------------------------------------------------------------------------- * If i is an F-pt, interpolation is from the most strongly influencing C-pt *--------------------------------------------------------------------------*/ if (max_abs_diag_offd[i] == 'd') { /* on diag part of P */ j = max_abs_cij[i]; //P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt; P_diag_j[cnt_diag] = fine_to_coarse[j]; P_diag_data[cnt_diag++] = 1.0; } else if (max_abs_diag_offd[i] == 'o') { /* on offd part of P */ j = max_abs_cij[i]; P_offd_j[cnt_offd] = j; P_offd_data[cnt_offd++] = 1.0; } } P_diag_i[i+1] = cnt_diag; P_offd_i[i+1] = cnt_offd; } hypre_assert(cnt_diag == nnz_diag); hypre_assert(cnt_offd == nnz_offd); /* num of cols in the offd part of P */ num_cols_offd_P = 0; /* marker_offd: all -1 */ for (i = 0; i < num_cols_A_offd; i++) { marker_offd[i] = -1; } for (i = 0; i < nnz_offd; i++) { i1 = P_offd_j[i]; if (marker_offd[i1] == -1) { num_cols_offd_P++; marker_offd[i1] = 1; } } /* col_map_offd_P: the col indices of the offd of P * we first keep them be the offd-idx of A */ col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST); for (i = 0, i1 = 0; i < num_cols_A_offd; i++) { if (marker_offd[i] == 1) { tmp_map_offd[i1++] = i; } } hypre_assert(i1 == num_cols_offd_P); /* now, adjust P_offd_j to local idx w.r.t col_map_offd_R * by searching */ for (i = 0; i < nnz_offd; i++) { i1 = P_offd_j[i]; k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P); /* search must succeed */ hypre_assert(k1 >= 0 && k1 < num_cols_offd_P); P_offd_j[i] = k1; } /* change col_map_offd_P to global coarse ids */ for (i = 0; i < num_cols_offd_P; i++) { col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]]; } /* Now, we should have everything of Parcsr matrix P */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */ total_global_cpts, /* global num of cols */ hypre_ParCSRMatrixColStarts(A), /* row_starts */ num_cpts_global, /* col_starts */ num_cols_offd_P, /* num cols offd */ nnz_diag, nnz_offd); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* P does not own ColStarts, since A does */ hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; /* create CommPkg of P */ hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* free workspace */ hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST); hypre_TFree(marker_diag,HYPRE_MEMORY_HOST); hypre_TFree(marker_offd,HYPRE_MEMORY_HOST); hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST); hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST); return hypre_error_flag; }
GB_unop__log_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log_fp64_fp64) // op(A') function: GB (_unop_tran__log_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bml_scale_csr_typed.c
#include "../../typed.h" #include "../blas.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_parallel.h" #include "../bml_scale.h" #include "../bml_types.h" #include "bml_allocate_csr.h" #include "bml_copy_csr.h" #include "bml_scale_csr.h" #include "bml_types_csr.h" #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Scale an csr matrix row. * * \ingroup scale_group * * \param arow The row to be scaled */ void TYPED_FUNC( csr_scale_row) ( void *_scale_factor, csr_sparse_row_t * arow) { #ifdef NOBLAS LOG_ERROR("No BLAS library"); #else REAL_T *scale_factor = _scale_factor; const int NNZ = arow->NNZ_; const int inc = 1; C_BLAS(SCAL) (&NNZ, scale_factor, arow->vals_, &inc); #endif } /** Scale a csr matrix - result is a new matrix. * * \ingroup scale_group * * \param A The matrix to be scaled * \return A scale version of matrix A. */ bml_matrix_csr_t *TYPED_FUNC( bml_scale_csr_new) ( void *_scale_factor, bml_matrix_csr_t * A) { bml_matrix_csr_t *B = TYPED_FUNC(bml_copy_csr_new) (A); TYPED_FUNC(bml_scale_inplace_csr) (_scale_factor, B); return B; } /** Scale a csr matrix. * * \ingroup scale_group * * \param A The matrix to be scaled * \param B Scaled version of matrix A */ void TYPED_FUNC( bml_scale_csr) ( void *_scale_factor, bml_matrix_csr_t * A, bml_matrix_csr_t * B) { #ifdef NOBLAS LOG_ERROR("No BLAS library"); #else if (A != B) { TYPED_FUNC(bml_copy_csr) (A, B); } TYPED_FUNC(bml_scale_inplace_csr) (_scale_factor, B); } void TYPED_FUNC( bml_scale_inplace_csr) ( void *_scale_factor, bml_matrix_csr_t * A) { const int N = A->N_; #pragma omp parallel for for (int i = 0; i < N; i++) { TYPED_FUNC(csr_scale_row) (_scale_factor, A->data_[i]); } #endif }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #if defined(USE_MKL) && defined(_OPENMP) #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // USE_MKL && _OPENMP namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp : public Operator { #if defined(USE_MKL) && defined(_OPENMP) static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + genImpl.rand() % 4096; // NOLINT(runtime/threadsafe_fn) const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed + my_offset); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } // MKL forward pass static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<cpu> *s, RandGenerator<cpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases if (sizeof(DType) >= sizeof(int)) { Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; BernoulliGenerate(*pgen, count, pkeep, maskptr); const float pk_1 = 1.0f / pkeep; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i] * pk_1; } return true; } return false; } // MKL backward pass static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<cpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { if (sizeof(DType) >= sizeof(int)) { Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; const float pk_1 = 1.0f / pkeep; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1; } return true; } return false; } #ifdef __CUDACC__ // GPU never uses MKL static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<gpu> *s, RandGenerator<gpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { return false; } static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<gpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { return false; } #endif // __CUDACC__ #else // #if defined(USE_MKL) && defined(_OPENMP) static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<xpu> *s, RandGenerator<xpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { return false; } static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<xpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { return false; } #endif // #if defined(USE_MKL) && defined(_OPENMP) public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; explicit DropoutOp(DropoutParam param) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); } virtual void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data, const std::vector<TBlob> &aux_states) { if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &out = out_data[dropout::kOut]; if (ctx.is_train || this->mode_ == dropout::kAlways) { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); if (!MKLForward(s, pgen, this->pkeep_, in_data, out_data)) { const TBlob &mask = out_data[dropout::kMask]; CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in_data[dropout::kData].dptr<DType>(), this->pkeep_); } } else { const TBlob& data = in_data[dropout::kData]; if (req[dropout::kOut] == kWriteTo) { mxnet_op::copy(s, out, data); } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), data.dptr<DType>()); }); } } } } virtual void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &aux_states) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(out_grad.size(), 1U); CHECK_EQ(in_grad.size(), 1U); Stream<xpu> *s = ctx.get_stream<xpu>(); if (ctx.is_train || mode_ == dropout::kAlways) { if (!MKLBackward(s, this->pkeep_, in_grad, out_data, out_grad)) { const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; if (req[dropout::kData] == kWriteTo) { mxnet_op::copy(s, gdata, grad); } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; }; // class DropoutOp template<typename xpu> Operator *CreateOp(DropoutParam param, int dtype); #if DMLC_USE_CXX11 class DropoutProp : public OperatorProperty { public: void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override { param_.Init(kwargs); } std::map<std::string, std::string> GetParams() const override { return param_.__DICT__(); } bool InferShape(std::vector<TShape> *in_shape, std::vector<TShape> *out_shape, std::vector<TShape> *aux_shape) const override { using namespace mshadow; CHECK_EQ(in_shape->size(), 1U); const TShape &dshape = in_shape->at(0); if (dshape.ndim() == 0) return false; out_shape->clear(); out_shape->push_back(dshape); out_shape->push_back(dshape); return true; } bool InferType(std::vector<int> *in_type, std::vector<int> *out_type, std::vector<int> *aux_type) const override { CHECK_EQ(in_type->size(), 1U); int dtype = in_type->at(0); if (dtype == -1) { LOG(FATAL) << "input type to dropout is not specified."; return false; } size_t nout = this->ListOutputs().size(); out_type->clear(); for (size_t i = 0; i < nout; ++i) out_type->push_back(dtype); return true; } OperatorProperty* Copy() const override { auto ptr = new DropoutProp(); ptr->param_ = param_; return ptr; } std::string TypeString() const override { return "Dropout"; } std::vector<int> DeclareBackwardDependency( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data) const override { return {out_grad[dropout::kOut], out_data[dropout::kMask]}; } std::vector<std::pair<int, void*> > BackwardInplaceOption( const std::vector<int> &out_grad, const std::vector<int> &in_data, const std::vector<int> &out_data, const std::vector<void*> &in_grad) const override { return {{out_grad[dropout::kOut], in_grad[dropout::kData]}}; } std::vector<std::pair<int, void*> > ForwardInplaceOption( const std::vector<int> &in_data, const std::vector<void*> &out_data) const override { return {{in_data[dropout::kData], out_data[dropout::kOut]}}; } std::vector<ResourceRequest> ForwardResource(const std::vector<TShape> &in_shape) const override { return { ResourceRequest::kParallelRandom }; } int NumVisibleOutputs() const override { return 1; } int NumOutputs() const override { return 2; } std::vector<std::string> ListOutputs() const override { return {"output", "mask"}; } Operator* CreateOperator(Context ctx) const override { LOG(FATAL) << "Not Implemented"; return NULL; } Operator* CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape, std::vector<int> *in_type) const override; private: DropoutParam param_; }; // class DropoutProp #endif // DMLC_USE_CXX11 } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1020,1024)),ceild(32*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(8*t1+Nx+13,1024)),floord(16*t2+Nx+12,1024)),floord(32*t3+Nx+28,1024)),floord(16*t1-16*t2+Nz+Nx+11,1024));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),1024*t4+1022),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/iterator.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ODRHash; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class IfStmtBitfields { friend class IfStmt; unsigned : NumStmtBits; unsigned IsConstexpr : 1; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor friend class OpaqueValueExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 17 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 2; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned BasePathSize : 32 - 6 - NumExprBits; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; IfStmtBitfields IfStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; CoawaitExprBitfields CoawaitBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) == sizeof(void *), "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *IgnoreImplicit() const { return const_cast<Stmt *>(this)->IgnoreImplicit(); } /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef llvm::iterator_range<child_iterator> child_range; typedef llvm::iterator_range<const_child_iterator> const_child_range; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// \brief Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } friend class ASTStmtReader; friend class ASTStmtWriter; }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, ArrayRef<Stmt *> Stmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return const_child_range(child_iterator(Body), child_iterator(Body + CompoundStmtBits.NumStmts)); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return reinterpret_cast<const Attr *const *>(this + 1); } const Attr **getAttrArrayPtr() { return reinterpret_cast<const Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { INIT, VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, bool IsConstexpr, Stmt *init, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { INIT, VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, Stmt *Init, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Stmt *getInit() { return SubExprs[INIT]; } const Stmt *getInit() const { return SubExprs[INIT]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; /// \brief Const iterator that walks over the capture initialization /// arguments. typedef Expr *const *const_capture_init_iterator; typedef llvm::iterator_range<const_capture_init_iterator> const_capture_init_range; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
parallel_iter.c
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<omp.h> #include<math.h> #include<string.h> #define constant 6.28318530718 #define CLK CLOCK_MONOTONIC struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if((end.tv_nsec-start.tv_nsec)<0){ temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else{ temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } typedef struct { unsigned char gs; } PPMPixelGS; typedef struct { int x, y; PPMPixelGS *data; } PPMImageGS; typedef struct{ double real; double imag; } Complex; #define RGB_COMPONENT_COLOR 255 void writePPMGS(const char *filename, PPMImageGS *img); static PPMImageGS *readPPMGS(const char *filename); /*-----------------------------------convert to image complex arrays------------------------------*/ Complex** convert(PPMImageGS *im, int p) { int rows = im->x; int cols = im->y; int i,j,idx; Complex **arr = (Complex **)malloc(rows * sizeof(Complex *)); for (i=0; i<rows; i++) arr[i] = (Complex *)malloc(cols * sizeof(Complex )); # pragma omp parallel \ shared ( arr,im, rows,cols ) \ private ( j,idx ) num_threads(p) # pragma omp for nowait for(i=0;i<rows;i++) { for(j=0; j<cols; j++) { idx = cols*i + j; PPMPixelGS *temp = im->data + idx; arr[i][j].real=(double)temp->gs; arr[i][j].imag=0.0; } } return arr; } /*------------------------------look up table-----------------------------------------------*/ void twiddle( int n, double w[], int p ) { double arg; double aw; int i; int n2; const double pi = constant/2; n2 = n / 2; aw = 2.0 * pi / ( ( double ) n ); # pragma omp parallel \ shared ( aw, n, w ) \ private ( arg, i ) num_threads(p) # pragma omp for nowait for ( i = 0; i < n2; i++ ) { arg = aw * ( ( double ) i ); w[i*2+0] = cos ( arg ); w[i*2+1] = sin ( arg ); } return; } /*-------------------------------------FFT--------------------------------*/ void FFT(double *x, double *y, int n, double w[], int p) { int m,i,j,k,i2; double tx,ty; m=0; i= n; /* m = logN calculation*/ while(i>0) { i/=2; m++; } m-=1; /* Do the bit reversal */ i2 = n >> 1; j = 0; for (i=0;i<n -1;i++) { if (i < j) { tx = x[i]; ty = y[i]; x[i] = x[j]; y[i] = y[j]; x[j] = tx; y[j] = ty; } k = i2; while (k <= j) { j -= k; k >>= 1; } j += k; } /* FFT computation */ int mj, term_i, mi, j2, count2; double u1, u2, t1, t2; mj = 1; //stride of j for(k=0;k<m;k++) { mi = 2*mj; //stride of i term_i = n/mi; #pragma omp parallel \ shared(x,y,mj,mi,n,term_i) \ private(count2,j2,j,t1,t2,u1,u2) num_threads(p) #pragma omp for nowait for(i=0; i<term_i; i++) { count2=0; for(j=i*mi;count2<mj;j++, count2++) { j%=(n-1); j2 = (j+ mj); int twiddle_index = count2*n/mi; u1 = w[twiddle_index*2+0]; u2 = -w[twiddle_index*2+1]; t1 = u1 * x[j2] - u2 * y[j2]; t2 = u1 * y[j2] + u2 * x[j2]; x[j2] = x[j] - t1; y[j2] = y[j] - t2; x[j] += t1; y[j] += t2; } } mj = mj*2; } } /*-----------------------------------2D FFT------------------------------*/ void FFT_2D(Complex **comp_in,int rows, int cols, double *w, int p) { int i,j; for(i=0;i<rows;i++) { double x[rows]; double y[rows]; for(j=0; j<cols; j++) { x[j]=comp_in[i][j].real; y[j]=comp_in[i][j].imag; } FFT(x,y,cols,w,p); for(j=0; j<cols; j++) { comp_in[i][j].real=x[j]; comp_in[i][j].imag=y[j] ; } } } /*-----------------------------------calculate the transpose------------------------------*/ Complex ** transpose(int N,Complex **comp_in,int p){ int blockrow, blockcolumn, i = 0, j = 0; int blocksize; blocksize = 16; Complex **arr = (Complex **)malloc(N * sizeof(Complex *)); for (i=0; i<N; i++) arr[i] = (Complex *)malloc(N * sizeof(Complex )); for (blockrow = 0; blockrow < N; blockrow += blocksize) { #pragma omp parallel \ shared(comp_in,arr,blockrow) \ private(i,j) num_threads(p) #pragma omp for nowait for (blockcolumn = 0; blockcolumn < N; blockcolumn += blocksize) { for (i = blockrow; i < blockrow + blocksize; i++) { for (j = blockcolumn; j < blockcolumn + blocksize; j++) { arr[i][j] = comp_in[j][i]; } } } } return arr; } /*-----------------------------------convert complex arrays to image------------------------------*/ PPMImageGS * convert_comp_img(Complex **comp_in,int rows,int cols, int p) { int i,j; PPMImageGS *im2 = (PPMImageGS *) malloc(sizeof(PPMImageGS)); im2->x = rows; im2->y = cols; im2->data = (PPMPixelGS *) malloc(rows*cols*sizeof(PPMPixelGS)); double temp ; int idx; # pragma omp parallel \ shared (im2, rows,cols ) \ private ( j,idx,temp ) num_threads(p) # pragma omp for nowait for(i=0;i<rows;i++) { for(j=0; j<cols; j++) { idx = cols*i + j; temp = sqrt(comp_in[i][j].real*comp_in[i][j].real + comp_in[i][j].imag*comp_in[i][j].imag); PPMPixelGS *temp2 = im2->data + idx; temp2->gs = floor(temp); } } return im2; } /*-----------------------------------main function------------------------------*/ int main(int argc, char* argv[]) { struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg; clock_gettime(CLK, &start_e2e); int n = atoi(argv[1]); int p = atoi(argv[2]); //int run_id = atoi(argv[3]); char filename[30]; strcpy(filename,"input/"); strcat(filename,"gs_"); strcat(filename,argv[1]); strcat(filename,".ppm"); char *problem_name = "FFT"; char *approach_name = "iterative"; PPMImageGS *image,*transformed_img; Complex **comp_in; image = readPPMGS(filename); int rows= image->x; int cols = image->y; double* w = ( double * ) malloc (rows* sizeof ( double ) ); clock_gettime(CLK, &start_alg); comp_in = convert(image, p); twiddle(rows,w,p); FFT_2D(comp_in, rows, cols,w,p); comp_in = transpose(rows,comp_in,p); FFT_2D(comp_in,rows,cols,w,p); comp_in = transpose(cols,comp_in,p); transformed_img = convert_comp_img(comp_in,rows,cols,p); clock_gettime(CLK, &end_alg); char out_file[30] ; strcpy(out_file,"output/"); strcat(out_file,"gs_"); strcat(out_file,argv[1]); strcat(out_file,"fft_parallel.ppm"); writePPMGS(out_file,transformed_img); free(w); clock_gettime(CLK, &end_e2e); e2e = diff(start_e2e, end_e2e); alg = diff(start_alg, end_alg); //printf("%d,%d,%ld,%ld,%ld,%ld\n", n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); printf("%s,%s,%d,%d,%ld,%ld,%ld,%ld\n",problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); return 0; } /*-----------------------------------convert to image array to ppm------------------------------*/ void writePPMGS(const char *filename, PPMImageGS *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P5\n"); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, img->x, img->y, fp); fclose(fp); } /*-----------------------------------convert image(ppm) to array------------------------------*/ static PPMImageGS *readPPMGS(const char *filename) { char buff[16]; PPMImageGS *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '5') { fprintf(stderr, "Invalid image format (must be 'P5')\n"); exit(1); } //alloc memory form image img = (PPMImageGS *)malloc(sizeof(PPMImageGS)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixelGS*)malloc(img->x * img->y * sizeof(PPMPixelGS)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; }
TemporalMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/TemporalMaxPooling.c" #else static inline void THNN_(TemporalMaxPooling_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THIndexTensor *indices, int kW, int dW) { long niframe; long framesize; long noframe; int dimS = 0; // sequence dimension int dimF = 1; // feature dimension int ndims = input->nDimension; if (input->nDimension == 3) { dimS = 1; dimF = 2; } niframe = input->size[dimS]; framesize = input->size[dimF]; noframe = (niframe - kW) / dW + 1; THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THNN_ARGCHECK(input->nDimension == 2 || input->nDimension == 3, 2, input, "2D or 3D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size[dimS] >= kW, 2, "input sequence smaller than kernel size. Got: %d, Expected: %d", input->size[dimS], kW); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimS, noframe); THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimF, framesize) } if (indices != NULL) { THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimS, noframe); THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimF, framesize); } } void THNN_(TemporalMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THIndexTensor *indices, int kW, int dW) { long niframe; long framesize; long noframe; real *input_data; real *output_data; THIndex_t *indices_data; long t, y; int dimS = 0; // sequence dimension int dimF = 1; // feature dimension THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); if (input->nDimension == 3) { dimS = 1; dimF = 2; } /* sizes */ niframe = input->size[dimS]; framesize = input->size[dimF]; noframe = (niframe - kW) / dW + 1; /* get contiguous input */ input = THTensor_(newContiguous)(input); if (input->nDimension == 2) { /* resize output */ THTensor_(resize2d)(output, noframe, framesize); /* indices will contain index locations for each output point */ THIndexTensor_(resize2d)(indices, noframe, framesize); /* get raw pointers */ input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for(t = 0; t < noframe; t++) { real *ip = input_data + t*framesize*dW; real *op = output_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ long maxindex = -1; real maxval = -THInf; long x; for(x = 0; x < kW; x++) { real val = ip[x*framesize+y]; if (val > maxval) { maxval = val; maxindex = x; } } /* set output to local max */ op[y] = maxval; xp[y] = (real)maxindex; } } } else { /* number of batch frames */ long nbframe = input->size[0]; long i; /* resize output */ THTensor_(resize3d)(output, nbframe, noframe, framesize); /* indices will contain index locations for each output point */ THIndexTensor_(resize3d)(indices, nbframe, noframe, framesize); /* get raw pointers */ input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); indices_data = THIndexTensor_(data)(indices); for(i = 0; i < nbframe; i++) { real *inputSample_data = input_data + i*niframe*framesize; real *outputSample_data = output_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { real *ip = inputSample_data + t*framesize*dW; real *op = outputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ long maxindex = -1; real maxval = -THInf; long x; for(x = 0; x < kW; x++) { real val = ip[x*framesize+y]; if (val > maxval) { maxval = val; maxindex = x; } } /* set output to local max */ op[y] = maxval; xp[y] = (real)maxindex; } } } } /* cleanup */ THTensor_(free)(input); } void THNN_(TemporalMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THIndexTensor *indices, int kW, int dW) { long niframe; int noframe; long framesize; real *gradInput_data; real *gradOutput_data; THIndex_t *indices_data; long t, y; THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize and zero */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); int dimS = 0; // sequence dimension int dimF = 1; // feature dimension if (input->nDimension == 3) { dimS = 1; dimF = 2; } /* sizes */ niframe = input->size[dimS]; noframe = gradOutput->size[dimS]; framesize = gradOutput->size[dimF]; /* get raw pointers */ gradInput_data = THTensor_(data)(gradInput); gradOutput_data = THTensor_(data)(gradOutput); indices_data = THIndexTensor_(data)(indices); if (input->nDimension == 2) { for(t = 0; t < noframe; t++) { real *gip = gradInput_data + t*framesize*dW; real *gop = gradOutput_data + t*framesize; THIndex_t *xp = indices_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ long maxindex = (long)xp[y]; if (maxindex != -1) gip[maxindex*framesize+y] += gop[y]; } } } else { /* number of batch frames */ long nbframe = input->size[0]; long i; for(i = 0; i < nbframe; i++) { real *gradInputSample_data = gradInput_data + i*niframe*framesize; real *gradOutputSample_data = gradOutput_data + i*noframe*framesize; THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; for(t = 0; t < noframe; t++) { real *gip = gradInputSample_data + t*framesize*dW; real *gop = gradOutputSample_data + t*framesize; THIndex_t *xp = indicesSample_data + t*framesize; #pragma omp parallel for private(y) for(y = 0; y < framesize; y++) { /* compute local max: */ long maxindex = (long)xp[y]; if (maxindex != -1) gip[maxindex*framesize+y] += gop[y]; } } } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
GxB_Scalar_wait.c
//------------------------------------------------------------------------------ // GxB_Scalar_wait: wait for a scalar to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a scalar, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GxB_Scalar_wait // finish all work on a scalar ( GxB_Scalar *s ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE ((*s), "GxB_Scalar_wait (&s)") ; GB_RETURN_IF_NULL (s) ; GB_RETURN_IF_NULL_OR_FAULTY (*s) ; //-------------------------------------------------------------------------- // finish all pending work on the scalar //-------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK (*s)) { GrB_Info info ; GB_BURBLE_START ("GxB_Scalar_wait") ; GB_OK (GB_Matrix_wait ((GrB_Matrix) (*s), Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
loops.h
// Cuda/host loops #pragma once #include "cutil.h" #include "debug.h" #include "preprocessor.h" #include "print.h" #include <optional> #include <type_traits> #ifndef __APPLE__ #include <omp.h> #endif namespace mandelbrot { using std::is_signed_v; using std::optional; using std::tuple; // For now, assume we fit in int32_t template<class I> __device__ static inline int grid_stride_loop_size(const I n) { static_assert(is_signed_v<I>); return int(n); } // Define a grid stride loop #define GRID_STRIDE_LOOP(n, i) \ for (int _n = grid_stride_loop_size(n), \ _stride = blockDim.x * gridDim.x, \ i = blockIdx.x * blockDim.x + threadIdx.x; \ i < _n; i += _stride) // Call a one-dimensional grid-stride loop #define INVOKE_GRID_STRIDE_LOOP(name, n, ...) CUDA_OR_DIE(({ \ const int _n = (n); /* For now, assume we fit in int32_t */ \ name<<<32*num_sms(), 256>>>(_n, __VA_ARGS__); })) // Define 1D loop functions on CPU and GPU #define DEF_LOOP(name, n, i, args, body) \ IF_CUDA(template<class S> __global__ static void name##_device(const int n, UNPAREN args) { \ GRID_STRIDE_LOOP(n, i) { body } \ }) \ template<class S> static void name##_host(const int n, UNPAREN args) { \ for (int i = 0; i < n; i++) { body } \ } \ template<class... Args> static inline void name(const int64_t n, Args&&... xs) { \ if (!n) return; \ if constexpr ((... || is_device<Args>)) \ INVOKE_GRID_STRIDE_LOOP(name##_device, n, undevice(xs)...); \ else \ name##_host(n, std::forward<Args>(xs)...); \ } // Define a serial function on CPU and GPU (not a loop, but meh). // This is for reducing the number of total kernel invocations in base cases. #define DEF_SERIAL(name, args, body) \ IF_CUDA(template<class S> __global__ static void name##_device(UNPAREN args) { body }) \ template<class S> static void name##_host(UNPAREN args) { body } \ template<class... Args> static inline void name(Args&&... xs) { \ if constexpr ((... || is_device<Args>)) \ CUDA_OR_DIE(name##_device<<<1, 1, 0, stream()>>>(undevice(xs)...)); \ else \ name##_host(std::forward<Args>(xs)...); \ } // Chop a loop into [start,end) chunks tuple<int64_t,int64_t> partition_loop(const int64_t steps, const int threads, const int thread); // Parallel reductions that assume only associativity. // Formally, if (reduce(y, a), reduce(y, b)) is equivalent to (reduce(a, b), reduce(y, a)), then // this routine is equivalent to: // for (int64_t i = 0; i < n; i++) // reduce(y, map(i)); template<class Y, class R, class M> void map_reduce(Y& y, R&& reduce, M&& map, const int64_t n) { #if __APPLE__ for (int64_t i = 0; i < n; i++) reduce(y, map(i)); #else vector<optional<Y>> partials; #pragma omp parallel { const int threads = omp_get_num_threads(); const int thread = omp_get_thread_num(); const auto [start, end] = partition_loop(n, threads, thread); if (start < end) { #pragma omp critical { partials.resize(threads); } auto& p = partials[thread]; for (int64_t i = start; i < end; i++) { auto fx = map(i); if (i == start) p = move(fx); else reduce(*p, fx); } } } for (const auto& t : partials) if (t) reduce(y, *t); #endif } } // namespace mandelbrot
yescrypt-opt.c
/*- * Copyright 2009 Colin Percival * Copyright 2013,2014 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ #include <errno.h> #include <stdint.h> #include <stdlib.h> #include "sha256_Y.h" #include "sysendian.h" #include "yescrypt-platform.c" static inline uint32_t le32dec(const void *pp) { const uint8_t *p = (uint8_t const *)pp; return ((uint32_t)(p[0]) + ((uint32_t)(p[1]) << 8) + ((uint32_t)(p[2]) << 16) + ((uint32_t)(p[3]) << 24)); } static inline void le32enc(void *pp, uint32_t x) { uint8_t * p = (uint8_t *)pp; p[0] = x & 0xff; p[1] = (x >> 8) & 0xff; p[2] = (x >> 16) & 0xff; p[3] = (x >> 24) & 0xff; } static inline void blkcpy(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; *dest++ = *src++; } while (count -= 4); } static inline void blkxor(uint64_t * dest, const uint64_t * src, size_t count) { do { *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; *dest++ ^= *src++; } while (count -= 4); } typedef union { uint32_t w[16]; uint64_t d[8]; } salsa20_blk_t; static inline void salsa20_simd_shuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->d[out] = Bin->w[in1 * 2] | ((uint64_t)Bin->w[in2 * 2 + 1] << 32); COMBINE(0, 0, 2) COMBINE(1, 5, 7) COMBINE(2, 2, 4) COMBINE(3, 7, 1) COMBINE(4, 4, 6) COMBINE(5, 1, 3) COMBINE(6, 6, 0) COMBINE(7, 3, 5) #undef COMBINE } static inline void salsa20_simd_unshuffle(const salsa20_blk_t * Bin, salsa20_blk_t * Bout) { #define COMBINE(out, in1, in2) \ Bout->w[out * 2] = Bin->d[in1]; \ Bout->w[out * 2 + 1] = Bin->d[in2] >> 32; COMBINE(0, 0, 6) COMBINE(1, 5, 3) COMBINE(2, 2, 0) COMBINE(3, 7, 5) COMBINE(4, 4, 2) COMBINE(5, 1, 7) COMBINE(6, 6, 4) COMBINE(7, 3, 1) #undef COMBINE } /** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static void salsa20_8(uint64_t B[8]) { size_t i; salsa20_blk_t X; #define x X.w salsa20_simd_unshuffle((const salsa20_blk_t *)B, &X); for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns */ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18); x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9); x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18); x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9); x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18); /* Operate on rows */ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18); x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9); x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18); x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9); x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18); #undef R } #undef x { salsa20_blk_t Y; salsa20_simd_shuffle(&X, &Y); for (i = 0; i < 16; i += 4) { ((salsa20_blk_t *)B)->w[i] += Y.w[i]; ((salsa20_blk_t *)B)->w[i + 1] += Y.w[i + 1]; ((salsa20_blk_t *)B)->w[i + 2] += Y.w[i + 2]; ((salsa20_blk_t *)B)->w[i + 3] += Y.w[i + 3]; } } } /** * blockmix_salsa8(Bin, Bout, X, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. The * temporary space X must be 64 bytes. */ static void blockmix_salsa8(const uint64_t * Bin, uint64_t * Bout, uint64_t * X, size_t r) { size_t i; /* 1: X <-- B_{2r - 1} */ blkcpy(X, &Bin[(2 * r - 1) * 8], 8); /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i < 2 * r; i += 2) { /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4], X, 8); /* 3: X <-- H(X \xor B_i) */ blkxor(X, &Bin[i * 8 + 8], 8); salsa20_8(X); /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ blkcpy(&Bout[i * 4 + r * 8], X, 8); } } /* These are tunable */ #define S_BITS 8 #define S_SIMD 2 #define S_P 4 #define S_ROUNDS 6 /* Number of S-boxes. Not tunable, hard-coded in a few places. */ #define S_N 2 /* Derived values. Not tunable on their own. */ #define S_SIZE1 (1 << S_BITS) #define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8) #define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK) #define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD) #define S_P_SIZE (S_P * S_SIMD) #define S_MIN_R ((S_P * S_SIMD + 15) / 16) /** * pwxform(B): * Transform the provided block using the provided S-boxes. */ static void block_pwxform(uint64_t * B, const uint64_t * S) { uint64_t (*X)[S_SIMD] = (uint64_t (*)[S_SIMD])B; const uint8_t *S0 = (const uint8_t *)S; const uint8_t *S1 = (const uint8_t *)(S + S_SIZE1 * S_SIMD); size_t i, j; #if S_SIMD > 2 size_t k; #endif for (j = 0; j < S_P; j++) { uint64_t *Xj = X[j]; uint64_t x0 = Xj[0]; #if S_SIMD > 1 uint64_t x1 = Xj[1]; #endif for (i = 0; i < S_ROUNDS; i++) { uint64_t x = x0 & S_MASK2; const uint64_t *p0, *p1; p0 = (const uint64_t *)(S0 + (uint32_t)x); p1 = (const uint64_t *)(S1 + (x >> 32)); x0 = (uint64_t)(x0 >> 32) * (uint32_t)x0; x0 += p0[0]; x0 ^= p1[0]; #if S_SIMD > 1 x1 = (uint64_t)(x1 >> 32) * (uint32_t)x1; x1 += p0[1]; x1 ^= p1[1]; #endif #if S_SIMD > 2 for (k = 2; k < S_SIMD; k++) { x = Xj[k]; x = (uint64_t)(x >> 32) * (uint32_t)x; x += p0[k]; x ^= p1[k]; Xj[k] = x; } #endif } Xj[0] = x0; #if S_SIMD > 1 Xj[1] = x1; #endif } } /** * blockmix_pwxform(Bin, Bout, S, r): * Compute Bout = BlockMix_pwxform{salsa20/8, S, r}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. * * S lacks const qualifier to match blockmix_salsa8()'s prototype, which we * need to refer to both functions via the same function pointers. */ static void blockmix_pwxform(const uint64_t * Bin, uint64_t * Bout, uint64_t * S, size_t r) { size_t r1, r2, i; /* Convert 128-byte blocks to (S_P_SIZE * 64-bit) blocks */ r1 = r * 128 / (S_P_SIZE * 8); /* X <-- B_{r1 - 1} */ blkcpy(Bout, &Bin[(r1 - 1) * S_P_SIZE], S_P_SIZE); /* X <-- X \xor B_i */ blkxor(Bout, Bin, S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(Bout, S); /* for i = 0 to r1 - 1 do */ for (i = 1; i < r1; i++) { /* X <-- X \xor B_i */ blkcpy(&Bout[i * S_P_SIZE], &Bout[(i - 1) * S_P_SIZE], S_P_SIZE); blkxor(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], S_P_SIZE); /* X <-- H'(X) */ /* B'_i <-- X */ block_pwxform(&Bout[i * S_P_SIZE], S); } /* Handle partial blocks */ if (i * S_P_SIZE < r * 16) blkcpy(&Bout[i * S_P_SIZE], &Bin[i * S_P_SIZE], r * 16 - i * S_P_SIZE); i = (r1 - 1) * S_P_SIZE / 8; /* Convert 128-byte blocks to 64-byte blocks */ r2 = r * 2; /* B'_i <-- H(B'_i) */ salsa20_8(&Bout[i * 8]); i++; for (; i < r2; i++) { /* B'_i <-- H(B'_i \xor B'_{i-1}) */ blkxor(&Bout[i * 8], &Bout[(i - 1) * 8], 8); salsa20_8(&Bout[i * 8]); } } /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint64_t integerify(const uint64_t * B, size_t r) { /* * Our 64-bit words are in host byte order, and word 6 holds the second 32-bit * word of B_{2r-1} due to SIMD shuffling. The 64-bit value we return is also * in host byte order, as it should be. */ const uint64_t * X = &B[(2 * r - 1) * 8]; uint32_t lo = X[0]; uint32_t hi = X[6] >> 32; return ((uint64_t)hi << 32) + lo; } /** * smix1(B, r, N, flags, V, NROM, shared, XY, S): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be even and * no smaller than 2. */ static void smix1(uint64_t * B, size_t r, uint64_t N, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1; size_t s = 16 * r; uint64_t * X = V; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t n, i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ blockmix(X, Y, Z, r); blkcpy(&V[s], Y, s); X = XY; if (NROM && (VROM_mask & 1)) { if ((1 & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i + 1 - n; /* X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); } blockmix(Y, X, Z, r); } } else { yescrypt_flags_t rw = flags & YESCRYPT_RW; /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); /* 2: for i = 0 to N - 1 do */ for (n = 1, i = 2; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * s], X, s); if (rw) { if ((i & (i - 1)) == 0) n <<= 1; /* j <-- Wrap(Integerify(X), i) */ j = integerify(X, r) & (n - 1); j += i - n; /* X <-- X \xor V_j */ blkxor(X, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * s], Y, s); if (rw) { /* j <-- Wrap(Integerify(X), i) */ j = integerify(Y, r) & (n - 1); j += (i + 1) - n; /* X <-- X \xor V_j */ blkxor(Y, &V[j * s], s); } /* 4: X <-- H(X) */ blockmix(Y, X, Z, r); } } /* B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The value Nloop must be even. */ static void smix2(uint64_t * B, size_t r, uint64_t N, uint64_t Nloop, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { void (*blockmix)(const uint64_t *, uint64_t *, uint64_t *, size_t) = (S ? blockmix_pwxform : blockmix_salsa8); const uint64_t * VROM = shared->shared1.aligned; uint32_t VROM_mask = shared->mask1 | 1; size_t s = 16 * r; yescrypt_flags_t rw = flags & YESCRYPT_RW; uint64_t * X = XY; uint64_t * Y = &XY[s]; uint64_t * Z = S ? S : &XY[2 * s]; uint64_t i, j; size_t k; if (Nloop == 0) return; /* X <-- B' */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&B[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&X[i * 8]; for (k = 0; k < 16; k++) tmp->w[k] = le32dec(&src->w[k]); salsa20_simd_shuffle(tmp, dst); } if (NROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); j = integerify(Y, r); if (((i + 1) & VROM_mask) == 1) { /* j <-- Integerify(X) mod NROM */ j &= NROM - 1; /* X <-- H(X \xor VROM_j) */ blkxor(Y, &VROM[j * s], s); } else { /* 7: j <-- Integerify(X) mod N */ j &= N - 1; /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); } blockmix(Y, X, Z, r); } } else { /* 6: for i = 0 to N - 1 do */ i = Nloop / 2; do { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], X, s); blockmix(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * s], s); /* V_j <-- Xprev \xor V_j */ if (rw) blkcpy(&V[j * s], Y, s); blockmix(Y, X, Z, r); } while (--i); } /* 10: B' <-- X */ for (i = 0; i < 2 * r; i++) { const salsa20_blk_t *src = (const salsa20_blk_t *)&X[i * 8]; salsa20_blk_t *tmp = (salsa20_blk_t *)Y; salsa20_blk_t *dst = (salsa20_blk_t *)&B[i * 8]; for (k = 0; k < 16; k++) le32enc(&tmp->w[k], src->w[k]); salsa20_simd_unshuffle(tmp, dst); } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, shared, XY, S): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage * XY must be 256r+64 or (256r+64)*p bytes in length (the larger size is * required with OpenMP-enabled builds). The value N must be a power of 2 * greater than 1. */ static void smix(uint64_t * B, size_t r, uint64_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint64_t * V, uint64_t NROM, const yescrypt_shared_t * shared, uint64_t * XY, uint64_t * S) { size_t s = 16 * r; uint64_t Nchunk = N / p, Nloop_all, Nloop_rw; uint32_t i; Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) Nloop_rw = Nloop_all; else if (flags & YESCRYPT_RW) Nloop_rw = Nloop_all / p; Nchunk &= ~(uint64_t)1; /* round down to even */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ Nloop_rw &= ~(uint64_t)1; /* round down to even */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t Vchunk = i * Nchunk; uint64_t * Bp = &B[i * s]; uint64_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; if (Sp) smix1(Bp, 1, S_SIZE_ALL / 16, flags & ~YESCRYPT_PWXFORM, Sp, NROM, shared, XYp, NULL); if (!(flags & __YESCRYPT_INIT_SHARED_2)) smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp); smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, shared, XYp, Sp); } if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint64_t * Bp = &B[i * s]; #ifdef _OPENMP uint64_t * XYp = &XY[i * (2 * s + 8)]; #else uint64_t * XYp = XY; #endif uint64_t * Sp = S ? &S[i * S_SIZE_ALL] : S; smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; size_t B_size, V_size, XY_size, need; uint64_t * B, * V, * XY, * S; uint64_t sha256[4]; /* * YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose, * so don't let it have side-effects. Without this adjustment, it'd * enable the SHA-256 password pre-hashing and output post-hashing, * because any deviation from classic scrypt implies those. */ if (p == 1) flags &= ~YESCRYPT_PARALLEL_SMIX; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 1) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 1)) { errno = EINVAL; return -1; } #if S_MIN_R > 1 if ((flags & YESCRYPT_PWXFORM) && (r < S_MIN_R)) { errno = EINVAL; return -1; } #endif if ((p > SIZE_MAX / ((size_t)256 * r + 64)) || #if SIZE_MAX / 256 <= UINT32_MAX (r > SIZE_MAX / 256) || #endif (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } if (N > UINT64_MAX / ((uint64_t)t + 1)) { errno = EFBIG; return -1; } #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX) && (N > SIZE_MAX / 128 / (r * p))) { errno = ENOMEM; return -1; } #endif if ((flags & YESCRYPT_PWXFORM) && #ifndef _OPENMP (flags & YESCRYPT_PARALLEL_SMIX) && #endif p > SIZE_MAX / (S_SIZE_ALL * sizeof(*S))) { errno = ENOMEM; return -1; } NROM = 0; if (shared->shared1.aligned) { NROM = shared->shared1.aligned_size / ((size_t)128 * r); if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_PARALLEL_SMIX)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (uint64_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r + 64; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_PWXFORM) { size_t S_size = S_SIZE_ALL * sizeof(*S); #ifdef _OPENMP S_size *= p; #else if (flags & YESCRYPT_PARALLEL_SMIX) S_size *= p; #endif need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint64_t *)tmp.aligned; XY = (uint64_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint64_t *)local->aligned; V = (uint64_t *)((uint8_t *)B + B_size); XY = (uint64_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_PWXFORM) S = (uint64_t *)((uint8_t *)XY + XY_size); if (t || flags) { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, passwd, passwdlen); SHA256_Final_Y((uint8_t *)sha256, &ctx); passwd = (uint8_t *)sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, (uint8_t *)B, B_size); if (t || flags) blkcpy(sha256, B, sizeof(sha256) / sizeof(sha256[0])); if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) { smix(B, r, N, p, t, flags, V, NROM, shared, XY, S); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, &V[(size_t)16 * r * i * N], NROM, shared, &XY[((size_t)32 * r + 8) * i], S ? &S[S_SIZE_ALL * i] : S); #else smix(&B[(size_t)16 * r * i], r, N, 1, t, flags, V, NROM, shared, XY, S); #endif } } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, (uint8_t *)B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if ((t || flags) && buflen == sizeof(sha256)) { /* Compute ClientKey */ { HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, buf, buflen); if (r == 32) { // yescryptR32 HMAC_SHA256_Update_Y(&ctx, "WaviBanana", 10); } else if (r == 16) { // yescryptR16 HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); } else if (r == 8) { // yescryptR8 HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); } else { // yescrypt HMAC_SHA256_Update_Y(&ctx, salt, saltlen); } HMAC_SHA256_Final_Y((uint8_t *)sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX_Y ctx; SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, (uint8_t *)sha256, sizeof(sha256)); SHA256_Final_Y(buf, &ctx); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; }
sxc_fmt_plug.c
/* SXC cracker patch for JtR. Hacked together during Summer of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_sxc); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha.h" #include <openssl/blowfish.h> #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 // tuned on core i7 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "sxc" #define FORMAT_NAME "StarOffice .sxc" #define FORMAT_TAG "$sxc$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " Blowfish" #else #define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[32 / sizeof(uint32_t)]; static struct custom_salt { int cipher_type; // FIXME: cipher_type seems to be ignored int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[MAX_KEYS_PER_CRYPT][32]; unsigned char hash[MAX_KEYS_PER_CRYPT][32]; BF_KEY bf_key; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; int i; SHA_CTX ctx; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i])); SHA1_Final((unsigned char *)hash[i], &ctx); } #ifdef SIMD_COEF_32 { int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 20; pin[i] = (unsigned char*)hash[i]; pout[i] = key[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, cur_salt->key_size, 0); } #else pbkdf2_sha1(hash[0], 20, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, key[0], cur_salt->key_size, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, key[i]); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index+i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sxc_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { "iteration count", }, { FORMAT_TAG }, sxc_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, sxc_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
@mropes.nim.c
/* Generated by Nim Compiler v1.0.11 */ /* (c) 2019 Andreas Rumpf */ /* The generated code is subject to the original license. */ #define NIM_INTBITS 64 #include "nimbase.h" #include <string.h> #include <stdio.h> #undef LANGUAGE_C #undef MIPSEB #undef MIPSEL #undef PPC #undef R3000 #undef R4000 #undef i386 #undef linux #undef mips #undef near #undef far #undef powerpc #undef unix #define nimfr_(x, y) #define nimln_(x, y) typedef struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA; typedef struct TNimType TNimType; typedef struct TNimNode TNimNode; typedef struct RootObj RootObj; typedef struct NimStringDesc NimStringDesc; typedef struct TGenericSeq TGenericSeq; typedef struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg tySequence__WwUFq9cJ2xKRlsAWVEHyPRg; typedef struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g; typedef struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w; typedef struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ; typedef struct tyObject_GcStack__7fytPA5bBsob6See21YMRA tyObject_GcStack__7fytPA5bBsob6See21YMRA; typedef struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg; typedef struct tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ; typedef struct tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg; typedef struct tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw; typedef struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA; typedef struct tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw; typedef struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw; typedef struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg; typedef struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyTuple__ujsjpB2O9cjj3uDHsXbnSzg; typedef struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg; typedef struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ; typedef struct tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg; typedef tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* tyArray__USLYl0Lpkimm4FABiJ3ldA[4096]; typedef NU8 tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A; typedef NU8 tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ; typedef N_NIMCALL_PTR(void, tyProc__ojoeKfW4VYIm36I9cpDTQIg) (void* p, NI op); typedef N_NIMCALL_PTR(void*, tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ) (void* p); struct TNimType { NI size; tyEnum_TNimKind__jIBKr1ejBgsfM33Kxw4j7A kind; tySet_tyEnum_TNimTypeFlag__v8QUszD1sWlSIWZz7mC4bQ flags; TNimType* base; TNimNode* node; void* finalizer; tyProc__ojoeKfW4VYIm36I9cpDTQIg marker; tyProc__WSm2xU5ARYv9aAR4l0z9c9auQ deepcopy; }; typedef NU8 tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ; struct TNimNode { tyEnum_TNimNodeKind__unfNsxrcATrufDZmpBq4HQ kind; NI offset; TNimType* typ; NCSTRING name; NI len; TNimNode** sons; }; struct RootObj { TNimType* m_type; }; struct TGenericSeq { NI len; NI reserved; }; struct NimStringDesc { TGenericSeq Sup; NIM_CHAR data[SEQ_DECL_SIZE]; }; struct tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA { RootObj Sup; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* left; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* right; NI L; NimStringDesc* data; }; typedef N_NIMCALL_PTR(void, tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ) (void); struct tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g { NI refcount; TNimType* typ; }; struct tyObject_GcStack__7fytPA5bBsob6See21YMRA { void* bottom; }; struct tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w { NI len; NI cap; tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g** d; }; typedef tyObject_SmallChunk__tXn60W2f8h3jgAYdEmy5NQ* tyArray__SiRwrEKZdLgxqz9a9aoVBglg[512]; typedef NU32 tyArray__BHbOSqU1t9b3Gt7K2c6fQig[24]; typedef tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* tyArray__N1u1nqOgmuJN9cSZrnMHgOQ[32]; typedef tyArray__N1u1nqOgmuJN9cSZrnMHgOQ tyArray__B6durA4ZCi1xjJvRtyYxMg[24]; typedef tyObject_Trunk__W0r8S0Y3UGke6T9bIUWnnuw* tyArray__lh2A89ahMmYg9bCmpVaplLbA[256]; struct tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA { tyArray__lh2A89ahMmYg9bCmpVaplLbA data; }; typedef tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* tyArray__0aOLqZchNi8nWtMTi8ND8w[2]; struct tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw { tyArray__0aOLqZchNi8nWtMTi8ND8w link; NI key; NI upperBound; NI level; }; struct tyTuple__ujsjpB2O9cjj3uDHsXbnSzg { tyObject_BigChunk__Rv9c70Uhp2TytkX7eH78qEg* Field0; NI Field1; }; typedef tyTuple__ujsjpB2O9cjj3uDHsXbnSzg tyArray__LzOv2eCDGiceMKQstCLmhw[30]; struct tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg { NI len; tyArray__LzOv2eCDGiceMKQstCLmhw chunks; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg* next; }; struct tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg { NI minLargeObj; NI maxLargeObj; tyArray__SiRwrEKZdLgxqz9a9aoVBglg freeSmallChunks; NU32 flBitmap; tyArray__BHbOSqU1t9b3Gt7K2c6fQig slBitmap; tyArray__B6durA4ZCi1xjJvRtyYxMg matrix; tyObject_LLChunk__XsENErzHIZV9bhvyJx56wGw* llmem; NI currMem; NI maxMem; NI freeMem; NI occ; NI lastSize; tyObject_IntSet__EZObFrE3NC9bIb3YMkY9crZA chunkStarts; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* root; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* deleted; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* last; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw* freeAvlNodes; NIM_BOOL locked; NIM_BOOL blockChunkSizeIncrease; NI nextChunkSize; tyObject_AvlNode__IaqjtwKhxLEpvDS9bct9blEw bottomData; tyObject_HeapLinks__PDV1HBZ8CQSQJC9aOBFNRSg heapLinks; }; struct tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg { NI stackScans; NI cycleCollections; NI maxThreshold; NI maxStackSize; NI maxStackCells; NI cycleTableSize; NI64 maxPause; }; struct tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ { NI counter; NI max; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg* head; tyObject_PageDesc__fublkgIY4LG3mT51LU2WHg** data; }; struct tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ { tyObject_GcStack__7fytPA5bBsob6See21YMRA stack; NI cycleThreshold; NI zctThreshold; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w zct; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w decStack; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w tempStack; NI recGcLock; tyObject_MemRegion__x81NhDv59b8ercDZ9bi85jyg region; tyObject_GcStat__0RwLoVBHZPfUAcLczmfQAg stat; tyObject_CellSet__jG87P0AI9aZtss9ccTYBIISQ marked; tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w additionalRoots; NI gcThreadId; }; typedef NU8 tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg; typedef NIM_CHAR tyArray__9bKy7UA2LOi2vzOViufaW1Q[1024]; struct tySequence__WwUFq9cJ2xKRlsAWVEHyPRg { TGenericSeq Sup; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* data[SEQ_DECL_SIZE]; }; N_NIMCALL(void, nimGCvisit)(void* d, NI op); static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op); static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void); N_NIMCALL(void, nimRegisterGlobalMarker)(tyProc__T4eqaYlFJYZUv9aG9b1TV0bQ markerProc); N_NIMCALL(NimStringDesc*, mnewString)(NI len); N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a); N_NIMCALL(NimStringDesc*, setLengthStr)(NimStringDesc* s, NI newLen); N_NIMCALL(void*, newSeq)(TNimType* typ, NI len); static N_INLINE(void, asgnRef)(void** dest, void* src); static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr); static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); N_LIB_PRIVATE N_NOINLINE(void, addZCT__Y66tOYFjgwJ0k4aLz4bc0Q)(tyObject_CellSeq__Axo1XVm9aaQueTOldv8le5w* s, tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c); static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s); N_NIMCALL(TGenericSeq*, setLengthSeqV2)(TGenericSeq* s, TNimType* typ, NI newLen); N_NIMCALL(void, unsureAsgnRef)(void** dest, void* src); N_NIMCALL(TGenericSeq*, incrSeqV3)(TGenericSeq* s, TNimType* typ); static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src); static N_INLINE(void, copyMem__i80o3k0SgEI5gTRCzYdyWAsystem)(void* dest, void* source, NI size); static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size); N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest, NI addlen); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0); N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data); N_NIMCALL(void*, newObj)(TNimType* typ, NI size); N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src); static N_INLINE(void, nimGCunrefNoCycle)(void* p); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(NI, hash__6PCYkKlCNhq9cnRLnqWKkwQ)(NimStringDesc* x); static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b); static N_INLINE(NIM_BOOL, equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem)(void* a, void* b, NI size); static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size); N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b); N_LIB_PRIVATE N_NIMCALL(void, failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A)(NimStringDesc* msg); N_NIMCALL(NimStringDesc*, rawNewString)(NI space); N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, substr__2yh9cer0ymNRHlOOg8P7IuA)(NimStringDesc* s, NI first, NI last); N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x); N_LIB_PRIVATE N_NIMCALL(void, write__PArlm09bKklm2BLsCg6YtaA)(FILE* f, NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, open__gq12VLhVO0NBzUTnGgz4nw)(FILE** f, NimStringDesc* filename, tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg mode, NI bufSize); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f); static N_INLINE(void, nimZeroMem)(void* p, NI size); static N_INLINE(void, nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory)(void* a, int v, NI size); N_LIB_PRIVATE N_NIMCALL(NI, readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA)(FILE* f, void* buffer, NI len); static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s); N_LIB_PRIVATE N_NIMCALL(void, close__fU6ZlJAtQ9bre04EDZLdGsA_3)(FILE* f); N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename); N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename); tyArray__USLYl0Lpkimm4FABiJ3ldA cache__WGMp5Wo1NlgbAMOysPIfmQ; extern TNimType NTI__ytyiCJqK439aF9cIibuRVpAg_; TNimType NTI__OFzf0kSiPTcNreUIeJgWVA_; extern TNimType NTI__rR5Bzr1D5krxoo1NcNyeMA_; extern TNimType NTI__77mFvmsOLKik79ci2hXkHEg_; TNimType NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_; TNimType NTI__USLYl0Lpkimm4FABiJ3ldA_; NI gCacheTries__5GfZTThHPBfB9bjRZdFluBw; NI gCacheMisses__fLRm9am8S0daYBVNK6JKyBg; NI gCacheIntTries__opyfsNv023Md1P05mqsDew; extern TNimType NTI__WwUFq9cJ2xKRlsAWVEHyPRg_; extern tyObject_GcHeap__1TRH1TZMaVZTnLNcIHuNFQ gch__IcYaEuuWivYAS86vFMTS3Q; STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_4, "$", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_5, "ropes.nim(238, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_6, "ropes.nim(250, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_7, "ropes.nim(253, 20) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_8, "\012", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_9, "ropes.nim(263, 18) `false` invalid format string: ", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_10, "[$1, $2, $3]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_11, "FR_.len-=$1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_12, "} $1: ;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_13, "}$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_14, "FR_.len+=$1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_15, "void", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_16, ", ", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_17, "$1 $2;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_18, "typedef $1 $2 $2;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_19, "*", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_20, " ", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_21, ", NI $1Len_$2", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_22, " Result", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_23, "$1$2($3, $4)$5", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_24, "(*$1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_25, "static TNimType* $1;$n", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_26, "\011$1 = (TNimType*)hcrGetGlobal($2, \"$1\");$n", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_27, "extern TNimType $1;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_28, "NTI$1_", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_29, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_30, "$1.flags = $2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_31, "$1.name = $2;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_32, "$1.nextType = nimTypeRoot; nimTypeRoot=&$1;$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_33, "\011hcrRegisterGlobal($2, \"$1\", sizeof(TNimType), NULL, (void**)&$" "1);$n", 68); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_34, "TNimType $1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_35, "$1[$2]", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_36, "static TNimNode** $1;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_37, "\011hcrRegisterGlobal($3, \"$1\", sizeof(TNimNode*) * $2, NULL, (voi" "d**)&$1);$n", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_38, "static TNimNode* $1[$2];$n", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_39, "$1[$2] = &$3;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_40, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$" "n$1.name = \"Field$3\";$n", 86); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_41, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_42, "$1.len = $2; $1.kind = 2;$n", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_43, "$1.node = &$2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_44, "static N_NIMCALL(void, $1)(void* p, NI op)", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_45, "$1 a;$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_46, "a = ($1)p;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_47, "for ($1 = 0; $1 < $2; $1++) {$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_48, "($1 \? $1->$2 : 0)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_49, "$1.Sup", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_50, "#pragma pack(push, 1)$nstruct{", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_51, "};$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_52, "#pragma pack(pop)$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_53, "union{$n$1};$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_54, "$1 $2[SEQ_DECL_SIZE];$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_55, "$1 $2:$3;$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_56, "switch ($1.$2) {$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_57, "case $1 ... $2:$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_58, "(-2147483647 -1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_59, "IL64($1)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_60, "(IL64(-9223372036854775807) - IL64(1))", 38); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_61, "NIM_TRUE", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_62, "NIM_FALSE", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_63, "(($1) $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_64, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_65, "STRING_LITERAL($1, $2, $3);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_66, "static const struct {$n NI cap; void* allocator; NIM_CHAR data" "[$2+1];$n} $1 = { $2, NIM_NIL, $3 };$n", 101); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_67, "static const NimStringV2 $1 = {$2, (NimStrPayload*)&$3};$n", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_68, "case $1:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_69, "default:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_70, "break;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_71, "} $n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_72, "$1.$2", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_73, "$1$3[$2]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_74, "$1 {$n$2$3$4}\012", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_75, "$1;\012", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_76, "N_NIMCALL_PTR(void, $1)(void*, NI);\012", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_77, "\011$1 = (N_NIMCALL_PTR(void, )(void*, NI)) hcrRegisterProc($3, \"$" "1\", (void*)$2);\012", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_78, "$1.marker = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_79, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_80, "$1.offset = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_81, "NI $1;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_82, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_83, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o" "ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_84, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_85, "$1.flags = 1<<2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_86, "$1.destructor = (void*)$2; $1.size = sizeof($3); $1.name = $4;$" "n", 64); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_87, "NimDT_$1_$2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_88, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_89, "TNimNode* $1[$2];$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_90, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n" "ame = $5;$n", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_91, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_92, "Result", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_93, "$N#line $2 $1$N", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_94, "struct {$1} GCFRAME_;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_95, "\011}BeforeRet_: ;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_96, "}$N", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_97, "\011$1 = ($3) hcrRegisterProc($4, \"$1\", (void*)$2);$n", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_98, "$1(*)$2", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_99, "static void* $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_100, "\011$1 = ($2) ($3$4));$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_101, "$2 $1;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_102, "\011$1 = ($2) hcrRegisterProc($3, \"$1\", (void*)$1);$n", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_103, "\011$1 = ($2) hcrGetProc($3, \"$1\");$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_104, " $1;$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_105, "\011$1 = ($2*)hcrGetGlobal($3, \"$1\");$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_106, "NIM_CHECK_SIZE($1, $2);$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_107, "typedef NI32 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_108, "typedef NU8 $1;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_109, "typedef NU16 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_110, "typedef NI64 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_111, "typedef $1_PTR($2, $3) $4;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_112, "typedef struct {$nN_NIMCALL_PTR($2, ClP_0) $3;$nvoid* ClE_0;$n}" " $1;$n", 69); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_113, "typedef $1 $2[1];$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_114, "typedef $1 $2[$3];$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_115, " {$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_116, "char dummy;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_117, "TY", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_118, "typedef $1 $2;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_119, "$1 $2 {$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_120, "$1 Field$2;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_121, "typedef NU$2 $1;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_122, "typedef NU8 $1[$2];$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_123, "Field$1", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_124, "NIM_CONST $1 $2 = $3;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_125, ",$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_126, "{$1, ($2*)&$3}", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_127, "{{$1, $1 | NIM_STRLIT_FLAG}", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_128, "(($1)&$2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_129, "{NIM_NIL,NIM_NIL}", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_130, "{(($1) $2),NIM_NIL}", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_131, "$1,$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_132, "$1", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_133, "{{$1}}", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_134, "{$1}$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_135, "{$1, (NimStrPayload*)&$2}", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_136, "extern NIM_CONST $1 $2;$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_137, "goto NIMSTATE_$#;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_138, "$2* $1;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_139, "\011NimThreadVars* NimTV_;$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_140, "static N_NIMCALL(void, $1)(void)", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_141, "$1 {$n$2$3$4}$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_142, "$1;$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_143, "//", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_144, "$#;$n", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_145, "$#($#);$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_146, "$# = $#;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_147, "NULL", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_148, "((NU8)($1))", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_149, "($4*)(($1)+($2)), ($3)-($2)+1", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_150, "($5*)($1)+(($2)-($4)), ($3)-($2)+1", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_151, "($4*)($1)+($2), ($3)-($2)+1", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_152, "($5*)(*$1)$4+($2), ($3)-($2)+1", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_153, "($5*)$1$4+($2), ($3)-($2)+1", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_154, "$1, $1Len_0", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_155, "(*$1)$3, $2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_156, "$1$3, $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_157, "$1, $2", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_158, "$1.ClP_0($3$1.ClE_0);$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_159, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2);$n", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_160, "$1.ClP_0($3$1.ClE_0)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_161, "$1.ClE_0\? $1.ClP_0($3$1.ClE_0):(($4)($1.ClP_0))($2)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_162, "(", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_163, ")", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_164, ";$n", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_165, ");$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_166, "[", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_167, ": ", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_168, "Result: ", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_169, "];$n", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_170, "]", 1); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_171, "if ($1) goto $2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_172, "if (!($1)) goto $2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_173, "$1: ;$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_174, "!($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_175, "($3)((NU$2) ~($1))", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_176, "-($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_177, "((NI$2)-($1))", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_178, "($1 > 0\? ($1) : -($1))", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_179, "(($4)($1) + ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_180, "(($4)($1) - ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_181, "(($4)($1) * ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_182, "(($4)($1) / ($4)($2))", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_183, "($4)((NU$5)($1) >> (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_184, "($4)((NU$3)($1) << (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_185, "($4)((NI$3)($1) >> (NU$3)($2))", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_186, "($4)($1 & $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_187, "($4)($1 | $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_188, "($4)($1 ^ $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_189, "(($1 <= $2) \? $1 : $2)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_190, "(($1 >= $2) \? $1 : $2)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_191, "($4)((NU$3)($1) + (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_192, "($4)((NU$3)($1) - (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_193, "($4)((NU$3)($1) * (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_194, "($4)((NU$3)($1) / (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_195, "($4)((NU$3)($1) % (NU$3)($2))", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_196, "($1 == $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_197, "($1 <= $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_198, "($1 < $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_199, "((NU$3)($1) <= (NU$3)($2))", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_200, "((NU$3)($1) < (NU$3)($2))", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_201, "((NU64)($1) <= (NU64)($2))", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_202, "((NU64)($1) < (NU64)($2))", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_203, "((NU8)($1) == (NU8)($2))", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_204, "((NU8)($1) <= (NU8)($2))", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_205, "((NU8)($1) < (NU8)($2))", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_206, "($1 != $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_207, "($1.ClP_0 == $2.ClP_0 && $1.ClE_0 == $2.ClE_0)", 46); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_208, "($1)($2 $3 $4)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_209, "($#)($#)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_210, ".Sup", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_211, "$1.m_type == $2", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_212, "static TNimType* $#[2];$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_213, "sizeof($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_214, "$1->finalizer = (void*)$2;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_215, "((NI)sizeof($1))", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_216, "((NI)alignof($1))", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_217, "((NI)offsetof($1, $2))", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_218, "(*($1*) ($2))", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_219, "(($1) ($2))", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_220, "(($1) (ptrdiff_t) ($2))", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_221, "(*($1*) (&$2))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_222, "($1-1)", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_223, "$1 |= ((NU8)1)<<(($2) & 7);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_224, "($1- $2)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_225, "$1 |= ((NU16)1)<<(($2) & 15);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_226, "$1 |= ((NU32)1)<<(($2) & 31);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_227, "$1 |= ((NU64)1)<<(($2) & 63);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_228, "$1 &= ~(((NU8)1) << (($2) & 7));$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_229, "$1 &= ~(((NU16)1) << (($2) & 15));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_230, "$1 &= ~(((NU32)1) << (($2) & 31));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_231, "$1 &= ~(((NU64)1) << (($2) & 63));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_232, "$1 >= $2 && $1 <= $3", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_233, "$1 == $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_234, "(($1 &((NU8)1<<((NU)($2)&7U)))!=0)", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_235, "(($1 &((NU16)1<<((NU)($2)&15U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_236, "(($1 &((NU32)1<<((NU)($2)&31U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_237, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_238, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_239, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_240, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_241, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_242, "static NIM_CONST $1 $2 = $3;$n", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_243, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1" ")&7U));$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_244, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_245, "$1 = 0;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_246, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=(($5)(1)<<(($1)%(sizeof($5" ")*8)));$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_247, "$1 |=(($3)(1)<<(($2)%(sizeof($3)*8)));$n", 40); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_248, "$1.Field$2", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_249, "LOC$1.source", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_250, "LOC$#.dest", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_251, ".Field$1", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_252, ".$1", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_253, "TFrame $1;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_254, "if (!$1) goto $2;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_255, "goto $1;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_256, "TMP$1_", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_257, "static void* $#[$#] = {", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_258, "&&TMP$#_, ", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_259, "&&TMP$#_};$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_260, "goto *$#[$#];$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_261, "TMP$#_:$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_262, "case $1: $n$2break;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_263, "goto LA$1_;$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_264, "LA$1_: ;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_265, "NIMSTATE_$#:$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_266, "switch ($1) {$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_267, "default: __assume(0);$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_268, "goto BeforeRet_;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_269, "throw;$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_270, "else", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_271, "throw $1;$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_272, "$n#pragma omp $4$nfor ($1 = $2; $1 <= $3; ++$1)", 47); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_273, "$n#pragma omp $5$nfor ($1 = $2; $1 <= $3; $1 += $4)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_274, "case -1:$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_275, " goto BeforeRet_;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_276, "case $2: goto $1$2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_277, "(((NI*) $1)[1] < 0)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_278, "((((NI*) $1.ClE_0)[1]) < 0)", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_279, "$1 N_NIMCALL(void, $2)(void) {$N", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_280, "\011int* nim_hcr_dummy_ = 0;$n\011NIM_BOOL nim_hcr_do_init_ = hcrRegi" "sterGlobal($1, \"module_initialized_\", 1, NULL, (void**)&nim_hcr_" "dummy_);$n", 137); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_281, "{$N", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_282, "\011TFrame FR_; FR_.len = 0;$N", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_283, "}$N$N", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_284, "N_LIB_EXPORT N_NIMCALL(void, $1)(void* handle, N_NIMCALL_PTR(vo" "id*, getProcAddr)(void*, char*)) {$N", 99); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_285, "static $2 $1;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_286, "\011$1 = ($2) $3($4, $5);$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_287, "NIM_EXTERNC N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_288, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void) {$N", 58); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_289, "$nN_LIB_PRIVATE const char* hcr_module_list[] = {$n", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_290, "\011$1,$n", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_291, "\011\"\"};$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_292, "$nN_LIB_EXPORT N_NIMCALL(void**, HcrGetImportedModules)() { ret" "urn (void**)hcr_module_list; }$n", 95); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_293, "$nN_LIB_EXPORT N_NIMCALL(char*, HcrGetSigHash)() { return \"$1\";" " }$n$n", 69); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_294, "static void* hcr_handle;$N", 26); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_295, "N_LIB_EXPORT N_NIMCALL(void, $1)(void);$N", 41); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_296, "N_LIB_EXPORT N_NIMCALL(void, $1)(void*, N_NIMCALL_PTR(void*, ge" "tProcAddr)(void*, char*));$N", 91); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_297, "N_LIB_EXPORT N_NIMCALL(void, HcrCreateTypeInfos)(void);$N", 57); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_298, "\011$1();$N", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_299, "\011hcrInit((void**)hcr_module_list, $1, $2, $3, hcr_handle, nimGe" "tProcAddr);$n", 76); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_300, "\011$1(hcr_handle, nimGetProcAddr);$N", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_301, "\011hcrAddModule($1);\012", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_302, "\011HcrCreateTypeInfos();$N", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_303, "\011hcrRegisterGlobal($1, \"cmdCount\", sizeof(cmd_count), NULL, (vo" "id**)&cmd_count);$N", 82); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_304, "\011hcrRegisterGlobal($1, \"cmdLine\", sizeof(cmd_line), NULL, (void" "**)&cmd_line);$N", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_305, "N_LIB_PRIVATE N_NIMCALL(void, $1)(void);$N", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_306, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_307, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_308, "/* Generated by Nim Compiler v$1 */$N/* (c) 2019 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n" " $5 */$N", 201); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_309, "#define NIM_INTBITS $1\012", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_310, "typedef struct {$1} NimThreadVars;$n", 36); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_311, "#include \"$1\"$N", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_312, "#include $1$N", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_313, "--file:r\"$1\"$N", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_314, "\012[Symbols]$n$1", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_315, "/* Generated by Nim Compiler v$1 */$N/* (c) 2017 Andreas Rump" "f */$N/* The generated code is subject to the original license. " "*/$N", 131); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_316, "__$1__", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_317, "#ifndef $1$n#define $1$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_318, "N_CDECL(void, NimMain)(void);$n", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_319, "#endif /* $1 */$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_320, "var F={procname:$1,prev:framePtr,filename:$2,line:0};$n", 55); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_321, "framePtr = F;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_322, "var $1;$n", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_323, "if ($1 == undefined) {$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_324, "if ($1 === undefined) {$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_325, "var $1 = null;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_326, "var $1_Idx = 0;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_327, "[$1]", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_328, "new $1($2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_329, "var $# = null;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_330, "var $#_Idx = 0;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_331, "var $# = $#;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_332, "return [$#, $#];$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_333, "return $#;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_334, "BeforeRet: do {$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_335, "} while (false);$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_336, "try {$n$1} catch (e) {$n alert(\"Unhandled exception:\\n\" + e.mes" "sage + \"\\n\"$n}", 77); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_337, "function $#() { return $#.apply(this, arguments); }$n", 53); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_338, "function $#($#) {$n$#$#$#$#$#", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_339, "arrayConstr($1, $2, $3)", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_340, "NTI$1", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_341, "var $1 = {size: 0,kind: $2,base: null,node: null,finalizer: nul" "l};$n", 68); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_342, "$1.base = $2;$n", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_343, "\"$1\": {kind: 1, offset: $1, typ: $2, name: $3, len: 0, sons: nu" "ll}", 66); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_344, "var NNI$1 = {kind: 2, offset: 0, typ: null, name: null, len: $2" ", sons: {$3}};$n", 79); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_345, "var $1 = {size: 0, kind: $2, base: null, node: null, finalizer:" " null};$n", 72); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_346, "$1.node = NNI$2;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_347, "var NNI$1 = $2;$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_348, "{kind: 2, len: $1, offset: 0, typ: null, name: null, sons: [$2]" "}", 64); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_349, "{kind: 1, offset: \"$1\", len: 0, typ: $2, name: $3, sons: null}", 62); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_350, "[$1, $2]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_351, "[setConstr($1), $2]", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_352, "{kind: 3, offset: \"$1\", len: $3, typ: $2, name: $4, sons: [$5]}", 63); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_353, "{kind: 1, offset: \"Field$1\", len: 0, typ: $2, name: \"Field$1\", " "sons: null}", 74); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_354, "Field$1: $2", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_355, "m_type: $1", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_356, "$#: ", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_357, "({$1})", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_358, "nimCopy(null, $1, $2)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_359, "Tmp$1", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_360, "var $1 = $2, $3 = $1[0], $3_Idx = $1[1];$n", 42); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_361, "$1 = nimCopy(null, $1, $2);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_362, "$1[0][0]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_363, "$1[0][1]", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_364, "$1[0]", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_365, "$1[1]", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_366, "makeNimstrLit($1)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_367, "// line $2 \"$1\"$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_368, "F.line = $1;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_369, "($1 || $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_370, "if ($1) $2 = true; else {", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_371, "$2 = $1;", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_372, "($1 && $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_373, "if (!$1) $2 = false; else {", 27); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_374, "$1[0][$1[1]]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_375, "($1 = $2, $1)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_376, "$1 = (($5 $2 $3) $4)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_377, "(($1 $2 $3) $4)", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_378, "addInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_379, "($1 + $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_380, "subInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_381, "($1 - $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_382, "mulInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_383, "($1 * $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_384, "divInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_385, "Math.trunc($1 / $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_386, "modInt($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_387, "Math.trunc($1 % $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_388, "($1 / $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_389, "($1 << $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_390, "($1 >> $2)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_391, "($1 & $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_392, "($1 | $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_393, "($1 ^ $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_394, "nimMin($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_395, "nimMax($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_396, "($1 % $2)", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_397, "negInt($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_398, "negInt64($1)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_399, "absInt($1)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_400, "Math.abs($1)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_401, "+($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_402, "~($1)", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_403, "nimCharToStr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_404, "nimBoolToStr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_405, "cstrToNimstr(($1)+\"\")", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_406, "cstrToNimstr($1)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_407, "(($1 $2) >>> $3)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_408, "($# == $# && $# == $#)", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_409, "var $1 = $2; $2 = $3; $3 = $1;$n", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_410, "var $1 = $2; $2 = $3; $3 = $1;", 30); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_411, "$1 - 1", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_412, "subInt($1, 1)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_413, "if ($1 != null) { addChar($3, $2); } else { $3 = [$2]; }", 56); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_414, "if ($1 != null) { $4 += $2; } else { $4 = $2$3; }", 49); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_415, ".slice()", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_416, "if ($1 != null) { $4 = ($4).concat($2); } else { $4 = $2$3; }", 61); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_417, "if ($1 != null) { $3.push($2); } else { $3 = [$2]; }", 52); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_418, "var $1 = nimCopy(null, $2, $3);$n", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_419, "[$1].concat(", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_420, "($1 || []).concat(", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_421, "[$1],", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_422, "$1 || [],", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_423, "[$1])", 5); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_424, "$1 || [])", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_425, "eqStrings($1, $2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_426, "(cmpStrings($1, $2) <= 0)", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_427, "(cmpStrings($1, $2) < 0)", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_428, "($1 == null)", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_429, "($# == null && $# === 0)", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_430, "$1 = $2;$n", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_431, "$1 = [$3]; $2 = 0;$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_432, "$1 = [[$2], 0];$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_433, "($1 \? 1:0)", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_434, "($1 != null \? $2.length : 0)", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_435, "$1.length", 9); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_436, "($1 != null \? ($2.length-1) : -1)", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_437, "$1 += $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_438, "$1 = addInt($3, $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_439, "$1 -= $2", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_440, "$1 = subInt($3, $2)", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_441, "($1 == null \? $3 = mnewString($2) : $3.length = $2)", 51); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_442, "if ($1 === null) $4 = [];\012 if ($4.length < $2) { " "for (var i=$4.length;i<$5;++i) $4.push($3); }\012 els" "e { $4.length = $5; }", 148); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_443, "SetCard($1)", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_444, "SetLt($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_445, "SetLe($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_446, "SetEq($1, $2)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_447, "SetMul($1, $2)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_448, "SetPlus($1, $2)", 15); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_449, "SetMinus($1, $2)", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_450, "$1[$2] = true", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_451, "delete $1[$2]", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_452, "($1[$2] != undefined)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_453, "$1 = new Array($2); for (var i=0;i<$2;++i) {$1[i]=$3;}", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_454, "[]", 2); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_455, "($1.m_type == $2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_456, "isObj($1.m_type, $2)", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_457, "$1 = null, $2 = 0;$n", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_458, "$1 = genericReset($3, $2);$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_459, "($1.slice($2))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_460, "mnewString($1)", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_461, "mnewString(0)", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_462, "($1 = $2, $1[0]), $1[1]", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_463, "($1 = $2, $1)[0]", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_464, "($1.slice($2, $3+1))", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_465, "var $1 = $2;$n", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_466, "Field$#: [$#, $#]", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_467, "Field$#: $#", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_468, "$#: [$#, $#]", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_469, "$#: $#", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_470, "{$1}", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_471, "(!!($1))", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_472, "(($1)|0)", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_473, "if ($1[$2.$3]$4undefined) { raiseFieldError(makeNimstrLit($5));" " }$n", 67); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_474, "!==", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_475, "===", 3); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_476, "chckIndx($1, $2, ($3 != null \? $3.length : 0)+$2-1)-$2", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_477, "($1)-$2", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_478, "$1.charCodeAt($2)", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_479, "($1 $2)", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_480, "($1|0)", 6); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_481, "($1 - ($2 $3))", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_482, "null", 4); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_483, "chckRange($1, $2, $3)", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_484, "toJSStr($1)", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_485, "L$1: do {$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_486, "} while(false);$n", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_487, "else {$n", 8); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_488, "if ($1) {$n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_489, "L$1: while (true) {$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_490, "if (!$1) break L$2;$n", 21); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_491, "switch (toJSStr($1)) {$n", 24); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_492, "default: $n", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_493, "break BeforeRet;$n", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_494, "break L$1;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_495, "$1 = nimCopy(null, $2, $3);$n", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_496, "nimCopy($1, $2, $3);$n", 22); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_497, "var $1 = $4; $2 = $1[0]; $3 = $1[1];$n", 38); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_498, "$# = [$#, $#];$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_499, "$1 = $2; $3 = $4;$n", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_500, "try {$n", 7); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_501, "--excHandler;$n} catch (EXC) {$n var prevJSError = lastJSError;" "$n lastJSError = EXC;$n --excHandler;$n", 102); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_502, "framePtr = $1;$n", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_503, "lastJSError instanceof $1", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_504, "isObj(lastJSError.m_type, $1)", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_505, "if (lastJSError && ($1)) {$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_506, "var $1 = lastJSError;$n", 23); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_507, "lastJSError = prevJSError;$n", 28); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_508, "raiseException($1, $2);$n", 25); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_509, "$1 = true;$n", 12); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_510, "/* Generated by the Nim Compiler v$1 */$n/* (c) 2019 Andreas " "Rumpf */$n$nvar framePtr = null;$nvar excHandler = 0;$nvar lastJ" "SError = null;$nif (typeof Int8Array === \'undefined\') Int8Array " "= Array;$nif (typeof Int16Array === \'undefined\') Int16Array = Ar" "ray;$nif (typeof Int32Array === \'undefined\') Int32Array = Array;" "$nif (typeof Uint8Array === \'undefined\') Uint8Array = Array;$nif" " (typeof Uint16Array === \'undefined\') Uint16Array = Array;$nif (" "typeof Uint32Array === \'undefined\') Uint32Array = Array;$nif (ty" "peof Float32Array === \'undefined\') Float32Array = Array;$nif (ty" "peof Float64Array === \'undefined\') Float64Array = Array;$n", 633); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_511, "Deprecated", 10); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_512, "Deprecated:", 11); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_513, "\012<p><strong class=\"examples_text\">$1</strong></p>\012", 50); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_514, "\012\\textbf{$1}\012", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_515, "<span class=\"Comment\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_516, "\\spanComment{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_517, "<span class=\"Keyword\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_518, "\\spanKeyword{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_519, "<span class=\"Operator\">$1</span>", 32); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_520, "\\spanOperator{$1}", 17); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_521, "<span class=\"StringLit\">$1</span>", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_522, "\\spanStringLit{$1}", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_523, "<span class=\"CharLit\">$1</span>", 31); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_524, "\\spanCharLit{$1}", 16); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_525, "<span class=\"DecNumber\">$1</span>", 33); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_526, "\\spanDecNumber{$1}", 18); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_527, "<span class=\"FloatNumber\">$1</span>", 35); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_528, "\\spanFloatNumber{$1}", 20); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_529, "<a href=\"#$2\"><span class=\"Identifier\">$1</span></a>", 52); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_530, "\\spanIdentifier{$1}", 19); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_531, "<a href=\"$1#$2\"><span class=\"Identifier\">$3</span></a>", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_532, "<span class=\"Identifier\">$1</span>", 34); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_533, "<span><span class=\"Other\">{</span><span class=\"Other pragmadots" "\">...</span><span class=\"Other\">}</span></span><span class=\"prag" "mawrap\"><span class=\"Other\">$1</span><span class=\"pragma\">", 185); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_534, "\\spanOther{$1}", 14); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_535, "</span><span class=\"Other\">$1</span></span>", 43); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_536, "<span class=\"Other\">$1</span>", 29); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_537, "<a class=\"reference external\" href=\"$2\">$1</a>", 46); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_538, "<a href=\"$2#$1\"><span class=\"Identifier\">$1</span></a>", 54); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_539, "$1 -> \"$2\";$n", 13); STRING_LITERAL(TM__Vw9cfUOQOae9b9bzZBlucMZQg_540, "digraph $1 {$n$2}$n", 19); static N_NIMCALL(void, Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ)(void* p, NI op) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a; a = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)p; nimGCvisit((void*)(*a).left, op); nimGCvisit((void*)(*a).right, op); nimGCvisit((void*)(*a).data, op); } static N_NIMCALL(void, TM__Vw9cfUOQOae9b9bzZBlucMZQg_3)(void) { NI T1_; T1_ = (NI)0; for (T1_ = 0; T1_ < 4096; T1_++) { nimGCvisit((void*)cache__WGMp5Wo1NlgbAMOysPIfmQ[T1_], 0); } } N_LIB_PRIVATE N_NIMCALL(NI, len__9b0YRltzV3kNSE9aQTsG82wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a) { NI result; result = (NI)0; { if (!(a == NIM_NIL)) goto LA3_; result = ((NI) 0); } goto LA1_; LA3_: ; { result = ((*a).L > 0? ((*a).L) : -((*a).L)); } LA1_: ; return result; } static N_INLINE(void, incRef__AT1eRuflKWyTTBdLjEDZbg_3system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU64)((*c).refcount) + (NU64)(((NI) 8))); } static N_INLINE(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*, usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem)(void* usr) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* result; result = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; result = ((tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*) ((NI)((NU64)(((NI) (ptrdiff_t) (usr))) - (NU64)(((NI) 16))))); return result; } static N_INLINE(void, rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { addZCT__Y66tOYFjgwJ0k4aLz4bc0Q((&gch__IcYaEuuWivYAS86vFMTS3Q.zct), c); } static N_INLINE(void, decRef__AT1eRuflKWyTTBdLjEDZbgsystem)(tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* c) { (*c).refcount = (NI)((NU64)((*c).refcount) - (NU64)(((NI) 8))); { if (!((NU64)((*c).refcount) < (NU64)(((NI) 8)))) goto LA3_; rtlAddZCT__AT1eRuflKWyTTBdLjEDZbg_2system(c); } LA3_: ; } static N_INLINE(void, asgnRef)(void** dest, void* src) { { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T5_; if (!!((src == NIM_NIL))) goto LA3_; T5_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T5_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(src); incRef__AT1eRuflKWyTTBdLjEDZbg_3system(T5_); } LA3_: ; { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T10_; if (!!(((*dest) == NIM_NIL))) goto LA8_; T10_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T10_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem((*dest)); decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T10_); } LA8_: ; (*dest) = src; } static N_INLINE(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem)(tySequence__WwUFq9cJ2xKRlsAWVEHyPRg** s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI L; NI T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = ((*s) ? (*s)->Sup.len : 0); L = (NI)(T1_ - ((NI) 1)); result = (*s)->data[L]; unsureAsgnRef((void**) (&(*s)), (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) setLengthSeqV2(&((*s))->Sup, (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), ((NI) (L)))); return result; } static N_INLINE(void, nimCopyMem)(void* dest, void* source, NI size) { void* T1_; T1_ = (void*)0; T1_ = memcpy(dest, source, ((size_t) (size))); } static N_INLINE(void, copyMem__i80o3k0SgEI5gTRCzYdyWAsystem)(void* dest, void* source, NI size) { nimCopyMem(dest, source, size); } static N_INLINE(void, appendString)(NimStringDesc* dest, NimStringDesc* src) { { if (!!((src == NIM_NIL))) goto LA3_; copyMem__i80o3k0SgEI5gTRCzYdyWAsystem(((void*) ((&(*dest).data[(*dest).Sup.len]))), ((void*) ((*src).data)), ((NI) ((NI)((*src).Sup.len + ((NI) 1))))); (*dest).Sup.len += (*src).Sup.len; } LA3_: ; } N_LIB_PRIVATE N_NIMCALL(NimStringDesc*, dollar___mZ66tEveFIQokq3arf8Klw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { NimStringDesc* result; NI T1_; result = (NimStringDesc*)0; T1_ = (NI)0; T1_ = len__9b0YRltzV3kNSE9aQTsG82wg(r); result = mnewString(((NI) (T1_))); result = setLengthStr(result, ((NI) 0)); { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA5_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T9_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; T9_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T9_)) goto LA8; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T12_; if (!!(((*it).left == NIM_NIL))) goto LA11; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T12_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T12_]), (*it).right); it = (*it).left; } LA11: ; } s = (*it).data; result = resizeString(result, (s ? s->Sup.len : 0) + 0); appendString(result, s); } LA8: ; } } LA5_: ; } return result; } static N_INLINE(void, nimGCunrefNoCycle)(void* p) { tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g* T1_; T1_ = (tyObject_Cell__1zcF9cV8XIAtbN8h5HRUB8g*)0; T1_ = usrToCell__QFQqcLB3lgOdwipkv9a60xwsystem(p); decRef__AT1eRuflKWyTTBdLjEDZbgsystem(T1_); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, newRope__dBdikNFB2Y7QJ9aVJE7dGHg)(NimStringDesc* data) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NimStringDesc* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*) newObj((&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_), sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA)); (*result).Sup.m_type = (&NTI__OFzf0kSiPTcNreUIeJgWVA_); (*result).L = ((NI64)-((data ? data->Sup.len : 0))); T1_ = (NimStringDesc*)0; T1_ = (*result).data; (*result).data = copyStringRC1(data); if (T1_) nimGCunrefNoCycle(T1_); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___ShdZ6VrAQkY0nWR9a39b9bGdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; { if (!(a == NIM_NIL)) goto LA3_; result = b; } goto LA1_; LA3_: ; { if (!(b == NIM_NIL)) goto LA6_; result = a; } goto LA1_; LA6_: ; { result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(((NimStringDesc*) NIM_NIL)); (*result).L = (NI)(((*a).L > 0? ((*a).L) : -((*a).L)) + ((*b).L > 0? ((*b).L) : -((*b).L))); asgnRef((void**) (&(*result).left), a); asgnRef((void**) (&(*result).right), b); } LA1_: ; return result; } static N_INLINE(int, nimCmpMem)(void* a, void* b, NI size) { int result; result = (int)0; result = memcmp(a, b, ((size_t) (size))); return result; } static N_INLINE(NIM_BOOL, equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem)(void* a, void* b, NI size) { NIM_BOOL result; int T1_; result = (NIM_BOOL)0; T1_ = (int)0; T1_ = nimCmpMem(a, b, size); result = (T1_ == ((NI32) 0)); return result; } static N_INLINE(NIM_BOOL, eqStrings)(NimStringDesc* a, NimStringDesc* b) { NIM_BOOL result; NI alen; NI blen; { result = (NIM_BOOL)0; alen = (a ? a->Sup.len : 0); blen = (b ? b->Sup.len : 0); { if (!(alen == blen)) goto LA3_; { if (!(alen == ((NI) 0))) goto LA7_; result = NIM_TRUE; goto BeforeRet_; } LA7_: ; result = equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem(((void*) ((&a->data[((NI) 0)]))), ((void*) ((&b->data[((NI) 0)]))), ((NI) (alen))); goto BeforeRet_; } LA3_: ; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, insertInCache__yShmEg9cffWxI7s5XzEKBow_2)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI h; NI T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; gCacheTries__5GfZTThHPBfB9bjRZdFluBw += ((NI) 1); T1_ = (NI)0; T1_ = hash__6PCYkKlCNhq9cnRLnqWKkwQ(s); h = (NI)(T1_ & ((NI) 4095)); result = cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]; { NIM_BOOL T4_; T4_ = (NIM_BOOL)0; T4_ = (result == 0); if (T4_) goto LA5_; T4_ = !(eqStrings((*result).data, s)); LA5_: ; if (!T4_) goto LA6_; gCacheMisses__fLRm9am8S0daYBVNK6JKyBg += ((NI) 1); result = newRope__dBdikNFB2Y7QJ9aVJE7dGHg(s); asgnRef((void**) (&cache__WGMp5Wo1NlgbAMOysPIfmQ[(h)- 0]), result); } LA6_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__yShmEg9cffWxI7s5XzEKBow)(NimStringDesc* s) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; { if (!((s ? s->Sup.len : 0) == ((NI) 0))) goto LA3_; result = NIM_NIL; } goto LA1_; LA3_: ; { result = insertInCache__yShmEg9cffWxI7s5XzEKBow_2(s); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___Z7W1o5nPSc3ExfO5f7j1Gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* a, NimStringDesc* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = rope__yShmEg9cffWxI7s5XzEKBow(b); result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(a, T1_); return result; } N_LIB_PRIVATE N_NIMCALL(void, add__yG4AKzsBRS1W4MANDlXQeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, NimStringDesc* b) { unsureAsgnRef((void**) (&(*a)), amp___Z7W1o5nPSc3ExfO5f7j1Gg((*a), b)); } N_LIB_PRIVATE N_NIMCALL(void, add__IM4kcMNkkOLJtqdEqSxR8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ((*a), b)); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA)(NimStringDesc* frmt, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NI i; NI length; NI num; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; i = ((NI) 0); length = (frmt ? frmt->Sup.len : 0); result = NIM_NIL; num = ((NI) 0); { while (1) { NI start; if (!(i < length)) goto LA2; { if (!((NU8)(frmt->data[i]) == (NU8)(36))) goto LA5_; i += ((NI) 1); switch (((NU8)(frmt->data[i]))) { case 36: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_4)); i += ((NI) 1); } break; case 35: { i += ((NI) 1); add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[num]); num += ((NI) 1); } break; case 48 ... 57: { NI j; j = ((NI) 0); { while (1) { j = (NI)((NI)((NI)(j * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48)); i += ((NI) 1); { NIM_BOOL T14_; T14_ = (NIM_BOOL)0; T14_ = ((frmt ? frmt->Sup.len : 0) <= i); if (T14_) goto LA15_; T14_ = !((((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))); LA15_: ; if (!T14_) goto LA16_; goto LA10; } LA16_: ; } } LA10: ; num = j; { if (!((NI)((argsLen_0-1) + ((NI) 1)) < j)) goto LA20_; { NimStringDesc* T26_; if (!NIM_TRUE) goto LA24_; T26_ = (NimStringDesc*)0; T26_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T26_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_5)); appendString(T26_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T26_); } LA24_: ; } goto LA18_; LA20_: ; { add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j - ((NI) 1))]); } LA18_: ; } break; case 123: { NI j_2; i += ((NI) 1); j_2 = ((NI) 0); { while (1) { if (!(((NU8)(frmt->data[i])) >= ((NU8)(48)) && ((NU8)(frmt->data[i])) <= ((NU8)(57)))) goto LA30; j_2 = (NI)((NI)((NI)(j_2 * ((NI) 10)) + ((NU8)(frmt->data[i]))) - ((NI) 48)); i += ((NI) 1); } LA30: ; } num = j_2; { if (!((NU8)(frmt->data[i]) == (NU8)(125))) goto LA33_; i += ((NI) 1); } goto LA31_; LA33_: ; { { NimStringDesc* T40_; if (!NIM_TRUE) goto LA38_; T40_ = (NimStringDesc*)0; T40_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T40_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_6)); appendString(T40_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T40_); } LA38_: ; } LA31_: ; { if (!((NI)((argsLen_0-1) + ((NI) 1)) < j_2)) goto LA43_; { NimStringDesc* T49_; if (!NIM_TRUE) goto LA47_; T49_ = (NimStringDesc*)0; T49_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T49_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_7)); appendString(T49_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T49_); } LA47_: ; } goto LA41_; LA43_: ; { add__IM4kcMNkkOLJtqdEqSxR8A(&result, args[(NI)(j_2 - ((NI) 1))]); } LA41_: ; } break; case 110: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8)); i += ((NI) 1); } break; case 78: { add__yG4AKzsBRS1W4MANDlXQeg(&result, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8)); i += ((NI) 1); } break; default: { { NimStringDesc* T58_; if (!NIM_TRUE) goto LA56_; T58_ = (NimStringDesc*)0; T58_ = rawNewString((frmt ? frmt->Sup.len : 0) + 50); appendString(T58_, ((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_9)); appendString(T58_, frmt); failedAssertImpl__W9cjVocn1tjhW7p7xohJj6A(T58_); } LA56_: ; } break; } } LA5_: ; start = i; { while (1) { if (!(i < length)) goto LA60; { if (!!(((NU8)(frmt->data[i]) == (NU8)(36)))) goto LA63_; i += ((NI) 1); } goto LA61_; LA63_: ; { goto LA59; } LA61_: ; } LA60: ; } LA59: ; { NimStringDesc* T70_; if (!(start <= (NI)(i - ((NI) 1)))) goto LA68_; T70_ = (NimStringDesc*)0; T70_ = substr__2yh9cer0ymNRHlOOg8P7IuA(frmt, start, (NI)(i - ((NI) 1))); add__yG4AKzsBRS1W4MANDlXQeg(&result, T70_); } LA68_: ; } LA2: ; } return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQfMnMPks8jKz20fTXQy9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_10), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, rope__KOisMGxcPhz6CcSmxgwEQQ)(NI64 i) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; NimStringDesc* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; gCacheIntTries__opyfsNv023Md1P05mqsDew += ((NI) 1); T1_ = (NimStringDesc*)0; T1_ = nimInt64ToStr(i); result = rope__yShmEg9cffWxI7s5XzEKBow(T1_); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KxpxlR6eqq3gRIOYTfR67w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_11), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IFeEbVhQpPGgxkLehuSiBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_12), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYiowJAm8zF7RBRISElaLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_13), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZkZcMxwzInnijXy5kz1K3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_14), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(void, prepend__IM4kcMNkkOLJtqdEqSxR8A_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { unsureAsgnRef((void**) (&(*a)), amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(b, (*a))); } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, amp___4cYKitaHx6RQ9azRtQsZp6w)(NimStringDesc* a, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* b) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* T1_; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; T1_ = rope__yShmEg9cffWxI7s5XzEKBow(a); result = amp___ShdZ6VrAQkY0nWR9a39b9bGdQ(T1_, b); return result; } N_LIB_PRIVATE N_NIMCALL(void, writeRope__FwuzOBq6SLlanVUstm8q9cA)(FILE* f, tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r) { { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA4_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T8_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; T8_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T8_)) goto LA7; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T11_; if (!!(((*it).left == NIM_NIL))) goto LA10; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T11_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T11_]), (*it).right); it = (*it).left; } LA10: ; } s = (*it).data; write__PArlm09bKklm2BLsCg6YtaA(f, s); } LA7: ; } } LA4_: ; } } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9aA37gQrW88KHzpCAwhgjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_15), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PoDv5ydEvGdd9aiIF9cOiAPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_16), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vzbf0XksfaFTXNoTT6BCwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_17), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQVSDPkAFXHNoa1N7jYrNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_18), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6d8an6hdqiIrRjPW1wEh5Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_19), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gMbiWAc0IjihIq46IYhmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_20), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uHsu7fLXac4OhMNd79bSJwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_21), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3WM9b4PeyDKoIDFMvYcQX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_22), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p4LhaCxKpUERrq9cB9b8Mp9cw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_23), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TbMwXzwNL7txOQADiTjwKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_24), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E0nDsXp7tY4mC1BnrrjWmA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_25), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mbjeaBETPixw9bUvyk31B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_26), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AfR9bXoD9bcehKoM7F8O79bYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_27), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nlZFDYB4M9bmBbYqEropRVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_28), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dwsIkeXQe0E8HKrzN9aRE5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_29), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fIR1FG0QPRsKvEYKq4tJUQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_30), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jADQs38xm62v1oxF2cSvEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_31), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DZV83DjWnQ9a19atC2oeswXg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_32), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sfvTjNjtOC86mU9bHczF6ow)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_33), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9ab1aKSDn70Vte0NcIItnaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_34), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jadqNPnY9aM3oxYK6jarLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_35), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LvsIDF8olc08xBiqCYIUog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_36), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6Tfa1iP1ENVlWbe89cSELSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_37), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hKg2Id9cvzE5Dgl9cU31c4Vw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_38), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H3xXuIFdbz4MNb5T6BSfcQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_39), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ELXFo0GedkhGYj9bocTHZAg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_40), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aLrcjgzGJE3f9ab2uR37jog)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_41), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Q9c5iS9btBDBXZVoQktb1XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_42), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MALQXTKXJv7x9a9c247satLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_43), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0nBiBCva6YS9a9bSV2Vr7Zxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_44), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yyhPPkMkLJqWG6p8HGn9aoA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_45), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t8gRNGR1flvaCNlBxuLn1A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_46), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xQaqlAwFuwxqBFixw7ewLg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_47), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2SWcbuU7RHQR0b8y9aJ9a5VQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_48), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gSgutt9b7GMWVGBkCt0UHAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_49), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vcuq0AWiVDndx4UH9cJ9cBRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_50), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l4wxq9cmPihXoF5xnDVNR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_51), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zgEKWXsZtT6lqQ6XlgfrsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_52), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uXZ30k0oJEqGPZW57O3dwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_53), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tTI9aMQiBZdiEeBIVh7QtYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_54), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VJBBlA9aMl5p0yYB1WzSMVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_55), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jw4Sb0OSpKH1T5cLz7iyzA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_56), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0RQ2PINB4t8FjFlNUM6N9cQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_57), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LQ9bGxpANW8yeg5P9c0UYAaQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_58), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8tdlskieCnWysl9c9blzqZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_59), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KbFpNe1pZ7hIuQi7dp1dSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_60), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nunbo9aB0HmmYQJ3InIBEzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_61), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RBxLok7DyUB0aHl9bxPIl9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_62), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NARRjCd1x5Fr7NTTcoPRrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_63), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NlLLwmZHOiJUpZfuk00AWA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_64), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mF9aI9b3hDjj53TD2C2gTrHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_65), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PafMws9cJ9arr9a0RVMoIHmAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_66), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lAlmrWiRqEg9a9cd9a8kNhig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_67), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___f8NIixSwWrk6SXQ3BFamWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_68), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TTRh79a14hh1gb0owIP1Y6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_69), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmeCjGna9cPfiHHcfqmKXjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_70), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FsfRVuOOBePjn9cQ9aK7Vh1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_71), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___paA0sar8RKZqiwEaDfWo2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_72), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jr9cXNQhhlLDfFJH4RSjeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_73), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EnzikEr9bDhOR6GYxWuYSwQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_74), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QqzUiJcAEZE2azDhIWHrgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_75), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___20ZujjIFPkyqvS2OmenEAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_76), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vxo9ayk1xB18if39aZ1TBnKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_77), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NtQEfuK9bXszNTfYU57z19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_78), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AKNexo4CH8G2vDeWW34Vpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_79), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LE3oWAmB5YDSDHm3LNHhCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_80), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W83I2xs7lC32PrMs9bq4P2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_81), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JKMGBJtXtDvc0NwxujFmZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_82), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TA8WFV49atYpIneJatQWALw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_83), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nPenDL3j2Q6A1an1Cl3oCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_84), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TNkzce2Sd9bck2QRtketc8A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_85), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kqRXw2WRJqDnfQK0N30ydw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_86), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BKnrQUIV2xGn2MO0RK09aUw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_87), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SCyrk9acEm3vLZhXCV1fGNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_88), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___erDe9aYc2BNxzH9brKlmtEBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_89), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HSAgkeH84eiEd8MfKIuBQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_90), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1AD3Wp47Hcdfg6PO2ac0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_91), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T11tCz9bIGT2CcftAwrDXZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_92), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lS9bA1j3Ue6pp7sCliDsT8g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_93), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M3h9cTlVBrj2vakKBqQRlMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_94), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BBAyGuVoK6QA7nXfPUIYKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_95), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___g9b9arp3BWCGRHDe21SJso6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_96), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___09aVguRR64dWfw4b6fKBcqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_97), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tgUnLdPVK0vRqC0pWxMClQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_98), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FBNsdfF5FNrY4P9cYQIfvZQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_99), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cB7zULPbG5vWWdCukRjdqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_100), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dpzmcz9a6kXbhFacdElIMOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_101), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AWFBEodxoi9a61KDUc9aiw1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_102), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vHbYzYlzLPcurSm0Hu8InQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_103), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nzT6Rke9c7tkW9b3XMmld2LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_104), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cCc2iMcL3MEBZTTL3LCW1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_105), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ahBYcGrhpPvM5dTdzCQBrQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_106), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XI9awM9a9aQ9cB9bcS7uDRsa1Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_107), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cWNaGuyEpBbdBlD9b5nY1ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_108), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6P67I9czJ9aa9aZzVyYWUiGlw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_109), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S4jE5dFDtcCC8ODzxaJk6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_110), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Msid9awGKVeVe7p3v7WfNQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_111), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xyRsdWsGY1DVVispyn0Xeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_112), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EPABzhs2B9atAvHV4CUTw2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_113), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MhCcipNmSHgcDtN4cr8ng)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_114), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ul9cDZYl7YkH1RhZBTd9c6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_115), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QFf4DPoOk6Jy59cL2OASJzw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_116), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7yDHbEsisDNKcqQHIRgOuQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_117), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GwVmUG4AZCEAP8dBk4TGHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_118), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___q7DaQZqCe0lRO0rhBWzM0w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_119), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hGIvKp3CGssDQ2vSvfksxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_120), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1P82lz6H9anMKDbz1vYNpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_121), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dbg9bsMENUwtF9aO45wEGG3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_122), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ym0Pr6z8A9ajyOAgotpd9a9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_123), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___izqbVTMtpY7kMiTK4bPJ6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_124), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rouofEnBX1ok9aMXmOsKdHg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_125), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C3GQZbey70223GyG307UFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_126), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yxmLIVRKySYknm2wSBp9cpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_127), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8u7UPO7ZpaMkWoJRtZLlYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_128), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXT7cKE1NTiL4U2MdlA2yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_129), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___44q9ak51X9b9bmuZ9cK4LsFWOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_130), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___77dMna2dOod5LqwYkRMZGg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_131), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QXMcmOst45ThYFLo9cOKDiQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_132), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zldA3DCxzpAhONjlfz7iIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_133), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dnB3So2xw9c189c09a9cc9b4hxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_134), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___r2gXVULKoAtQjkgjf0Z4wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_135), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VsLzrOz1nS9cRBBz9ccZfETQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_136), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tRSKshYob5uzZE3eBVe59cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_137), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vcbf2lEZaiSjbAHwgt9aKXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_138), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sb2NV56uvmvOtYkgVsaVQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_139), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7STLi75js8HXlmFg7Abt9bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_140), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5O50gePV9adn3wgFGWjlOLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_141), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a3Y7eeGNXkOCLUktwxzN9ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_142), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ng8dczn37bLzoM9bsVdPwjQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_143), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___boICAAvO1zkTlYDOuEaj6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_144), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LeuvM3mIc6pSNktpm9cHSVw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_145), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mxQQ2vwZhwfDagj5SEXeHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_146), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x2NKZw9brJpylbwEtLfx9a9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_147), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmT2Gs9cB7RN9cmo9c9cBpfKsA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_148), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RiPFNabSvay09bAW4Jic2ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_149), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___efSHgbCUYoX1lUK7M9aj4Pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_150), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Vmgih7rhd9cXUC9cEBz2cwXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_151), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rB3209aHcqpT39anNUezpSjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_152), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___x85Q1O2QUnYbstPlxUCyAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_153), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L3AeZ1n9aK4C1jsBCeaCmlQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_154), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ebmRHYtM9cCbYF6WvKDfQ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_155), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qE1JtEDDOvP6J49a9cv9aK1Dg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_156), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctvQ2lU9b9bnVVpNP4GhIo2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_157), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8bHx2qDxS2yWIId1X52mqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_158), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kTDR7D9c9aomjcaUQOmKJ9csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_159), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1tj59chZC08k4TWYeZiqDnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_160), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___533QKY9a8quvLM1SsLE1JfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_161), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uFJUSitn9c1Tw6cF9cZf6x6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_162), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G8iCcDovsaw25PkF7wHs0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_163), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SY4U2QvmoQxocaG8MOmyHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_164), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bhkFYKbURxGcJnKpswdr2Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_165), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lTsL0bi6njxzDh9c8A32r2w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_166), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k4VEB3kaBL72FRQN8buzSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_167), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbQIA9cHUESCyYT1WEeIVbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_168), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___66KauNYQRukYNgmb6bVXEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_169), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S550SlHmWbDpD7rs0J2lrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_170), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGnLi1DjaBomQ9c9a6MOCA5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_171), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bEKtSmboScaCP8PPnlOWqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_172), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpwWwpfBXgcQ6xoLOH4CJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_173), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GHW5yjG8N9c2BQBun6aBJzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_174), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yup67SPGRVcwMdmZwc9cSag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_175), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ec65mR1N7BSL9cmUa3z9czvA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_176), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibyK70G44kCK9cN8nAkxyGA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_177), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H9b69aGZGrLOiKWQdd30yQ9bg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_178), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Te7bvH18PbGe5siNJ9aDTTA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_179), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MUaBvSw0MHw3qQi9bYavAmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_180), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bWYxjLMocXEvYgQQcC63rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_181), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZpcNBrQMfioSvQNxKHhu9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_182), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gywCjjjPZobIva6liQWNLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_183), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6PDHoyz05lEjxGNE0k0ikw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_184), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AXGsBlGV5DoEOwPJSl9bdJw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_185), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ygzR9aJ6oM1bZTq4Z2lNO3Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_186), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uYVc6UX8hcaEdrHosUQAOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_187), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AlV8xJkjCXujAUesHxezgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_188), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___L9asecuKwevQN2h9cWzyv6oA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_189), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nZD9cadh12dcqTFsXBHbCRg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_190), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dz1JHdrf1p9bPB9ad2dZBtYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_191), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0MUu7DVBoaLHTVUZe9bKoIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_192), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___29aIWEGnJW0wnITIeSKWfFg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_193), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n2CigWG38YNInkiL4n8g7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_194), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bb3v9bDRLv9c9bcQzGH9c5H4Gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_195), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tkJq8W3gQVDjuu9aT3THC6Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_196), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oyQkqbRkRzo43y6iRevkaA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_197), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YuphtPwdJHG6BUJOVa9bX3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_198), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EQxs5xa4FNWtMfcvmFZ9cMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_199), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5YbjRZxm0g3SrdnL73aQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_200), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MEALpIIbc0cKMcjQ7Xckzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_201), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yUc5o9ax9c9asIVNkfprLRPpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_202), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4JrnABFfF3UTQ3nO9a6mXzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_203), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bkAwkKoaz09cAQo9arQjGA0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_204), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7N9bV9cjVBHs9ciAhz7vgdI9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_205), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QX9cU2fNK0jJrZNDQKnAycA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_206), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vTbVjc6faJqdBrTckFLLWQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_207), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___v4k9cDtOUzGyUHJbnJ7kQKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_208), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0ym49cR6ES8k9bYWsnh1fELA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_209), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Jx78R9a9anGvjjocCaP8YgIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_210), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___s0lnM9cZDB9bOREa4Fx1leBw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_211), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aT7p9bNEmP3LxrK3OhspnSw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_212), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mV75vMLuQ8rrQEUzNz6llA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_213), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jhVz7tKuf0heLM2D3nL0gw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_214), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c4YKWXetPKpaUUF7Qft2gA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_215), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rCIIoKC0OrXhpuTFTIZn0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_216), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lXaYcLcHHuQ46VvpH6Qr2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_217), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___duX6hgjmpJtFFdvJVuoafg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_218), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GNSb4l0oRsR1gu66azz1LQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_219), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LGbUtKnsZL8FcQiQN7sWEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_220), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Xf9ajw9cRlpuqnFnlEuSpA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_221), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nVQhtKHyPC8pvPbUAUBU7A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_222), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bI5GhokFUA9bgO9av819cgdBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_223), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qTicKO8EMC9cWGOyybIz4WQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_224), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yZHx0qMqBvbhmZ0fMuAP6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_225), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YQzyPnY5vKAqE2RyLX0cew)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_226), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cIILAsA6BeRrvHfloZIscg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_227), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IwDTuHqkGn7wW16ga2ktSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_228), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lbkoHJP5AIgE86vP7MmlKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_229), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9b84wNYrm79cLYfx9bsPNHjPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_230), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___K5ihI3kW9cFBh6sKlfEpJwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_231), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEiBK88oEGnvYfkiei9cyJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_232), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Psy1qActyEYmIhrRo2KkJA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_233), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cZzkwYphs086zWiuLotXLA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_234), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kPsYd8d9cco3hhqO7CEAFeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_235), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BbOsdTh4ZRNKmiISHDyg3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_236), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Py40oiVtYdIelNuiQQjpjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_237), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QzVlk7tEXgagMWC19aLvbkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_238), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qxufH5vUl9aY2l9cFq39bnVwA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_239), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jiTCvQQpgMU0bTrdVuECiw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_240), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n4OrLXC1r9a83k5wz2NoWxQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_241), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bJpxHYPJaxWBQn6QxwBA4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_242), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fOn9b5Ij3ytw2Ui9a2CPI5zw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_243), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zJU3FoYOdJ9bmuODPmqtgdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_244), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1MXpJAdeOMc2XMg5H7t9aSg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_245), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___VNAv31sqVgxrd9aXeFF5wYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_246), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MULS9c8dKz2mJ1U9a9cMyTCYw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_247), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5TB09c2Iz60T0YagbSbI5RQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_248), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NIzUqj4Mr1E3EKy0AkJaXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_249), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yQdCkIARIVr9aqI8oVxi9cQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_250), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WYvjnWcyRjjjI0lasIi1YA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_251), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hR4oq6WdDjEl0JIvQtvUlg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_252), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___84GQPNcrIJtbrzuA7JnMPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_253), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SqZEI7bxySjmJX4GsXyvKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_254), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___c1f569aWpTd825BTnv9bq4Xg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_255), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ibl3qMPOrpGT2x8X7vmbeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_256), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___bBcuDHMXr6Kz1tr7BzD9aKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_257), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aDvifvZOUmduC6Unfm69bKA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_258), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5kuxCbMO8PVJc9aJbXScUOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_259), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uu9cBz7dxPVDFhF9aLzWecyQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_260), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WWt3il4CHPiYP10KdNLrWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_261), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hc7hMh137dtaNdd3qw28EQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_262), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XWz49cQA2QiZaLkqHBU5L3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_263), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Au81R9a68Rv3gwlPtvDarPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_264), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yw741acxvsUs9cOX9cuiDj9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_265), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9caGByKkBhaXSZ6fCJLIdQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_266), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JmTWN8YiVKTZuvCYW2XNZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_267), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Kbv8OIo8zpawh7SNMbfgkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_268), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___B0OBOTOJQENvDd71LJ9b19bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_269), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___22ELRKd9bDuNug6qvIihS3A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_270), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ddrHnMlEhcHznkXv27msmQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_271), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yhJ9aDxHfJqHvWO0i6N9bukQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_272), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MLJpsW0DAZYB8lAgq09cUjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_273), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8tWfSjtTOlDafxpQPvChAA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_274), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xKLwwPkFSVy2Dtn9cuJ78xw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_275), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hdRijZdoPR3UGq9aUw2zFDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_276), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZjQc8bFVF8ePFYxjN0iVVg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_277), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SiqB8gWmdYKb4vtgqYrrMA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_278), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Ixv9aZ9bvpNaVAVzYBJlUPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_279), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HoXSbgR7plMG7Fef0fcy9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_280), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___H1Ma2EXqegHnMqzJZ4SA1g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_281), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___jpXTCDNVjIi5r4hbHN5SVQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_282), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4L62Yp9bLO2ZDcvBG9bSvP9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_283), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MCSdS9cTdQvttqiM9azLzkDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_284), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9bSTz8DQ4tgiLV9avQjFgFA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_285), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3CQpPXVDiNqC3jKO8Juliw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_286), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___w50CkyHBltcyR8rWxttZCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_287), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fmEfDTfNDkVDxWi9c0O6D2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_288), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___k9bgPIs43oLgxnk1l4TNQaw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_289), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5MqeIopvDuA9aozxL79cQ88g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_290), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Zp9bMZDO5tEkvVLTxiKsBkA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_291), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j5FZyaqnqjc2dcsUkAp28Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_292), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___EbvvG9awBeRKzx8xuBIb7TA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_293), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9a8besSQa09cOOt9b9cgdVwY9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_294), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oVKF7oq59cRGAaMpvWzNWbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_295), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___7ru3bwKuSx4Sc8ilsBmX3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_296), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___MDIdJXTVckPj57aO7LMVgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_297), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vQDE0VOBftnrpkVsM9cme4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_298), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bmR9bM9b0qqEqU0QJKnmLQnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_299), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88tWbH31SmOWJjgJ7RnfHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_300), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___t1CB59bEwlxfHZhNwNNz1bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_301), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YbLM7ZajsWOFLl4iSo0Krg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_302), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rH7Ns9bqAnnfkukwBIlz9bKg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_303), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zx9ctq3Ffe9aysjoWhZOzevQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_304), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T9a21DAzFCa3OqRooKKtkqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_305), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y4DThr9bpMbmoKpvgT1rYwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_306), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___811qrD9bMr21weOkImaKvIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_307), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YNifhKTQWQRf1atK7E3Qmg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_308), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YfbBxPLyPvVS6F2y9bSUFIA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_309), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OBvl4G6evYkvK9b9bClFGqNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_310), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pHsLkkx9bTDctZjmJqwCYRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_311), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ksH6NowTz9bh4eMOdyaiR1w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_312), args, argsLen_0); return result; } static N_INLINE(void, nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory)(void* a, int v, NI size) { void* T1_; T1_ = (void*)0; T1_ = memset(a, v, ((size_t) (size))); } static N_INLINE(void, nimZeroMem)(void* p, NI size) { nimSetMem__zxfKBYntu9cBapkhrCOk1fgmemory(p, ((int) 0), size); } static N_INLINE(NCSTRING, nimToCStringConv)(NimStringDesc* s) { NCSTRING result; result = (NCSTRING)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = (s == NIM_NIL); if (T3_) goto LA4_; T3_ = ((*s).Sup.len == ((NI) 0)); LA4_: ; if (!T3_) goto LA5_; result = ""; } goto LA1_; LA5_: ; { result = ((NCSTRING) ((*s).data)); } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__9bihNFg7Qajcg9arfx5cr9aHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, FILE* f) { NIM_BOOL result; tyArray__9bKy7UA2LOi2vzOViufaW1Q buf; NI bpos; NI blen; NI btotal; NI rtotal; NIM_BOOL T27_; NI T28_; { result = (NIM_BOOL)0; nimZeroMem((void*)buf, sizeof(tyArray__9bKy7UA2LOi2vzOViufaW1Q)); bpos = ((NI) 1024); blen = ((NI) 1024); btotal = ((NI) 0); rtotal = ((NI) 0); { NimStringDesc* s; s = (NimStringDesc*)0; { tySequence__WwUFq9cJ2xKRlsAWVEHyPRg* stack; if (!!((r == NIM_NIL))) goto LA4_; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) newSeq((&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_), 1); asgnRef((void**) (&stack->data[0]), r); { while (1) { NI T8_; tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* it; NI spos; NI slen; T8_ = (stack ? stack->Sup.len : 0); if (!(((NI) 0) < T8_)) goto LA7; it = pop__9c4Y4hTtvRqjj2EC8KP9aqDAsystem((&stack)); { while (1) { NI T11_; if (!!(((*it).left == NIM_NIL))) goto LA10; stack = (tySequence__WwUFq9cJ2xKRlsAWVEHyPRg*) incrSeqV3((TGenericSeq*)(stack), (&NTI__WwUFq9cJ2xKRlsAWVEHyPRg_)); T11_ = stack->Sup.len++; asgnRef((void**) (&stack->data[T11_]), (*it).right); it = (*it).left; } LA10: ; } s = (*it).data; spos = ((NI) 0); slen = (s ? s->Sup.len : 0); rtotal += slen; { while (1) { NI n; if (!(spos < slen)) goto LA13; { if (!(bpos == blen)) goto LA16_; bpos = ((NI) 0); blen = readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1024)); btotal += blen; { if (!(blen == ((NI) 0))) goto LA20_; result = NIM_FALSE; goto BeforeRet_; } LA20_: ; } LA16_: ; n = (((NI)(blen - bpos) <= (NI)(slen - spos)) ? (NI)(blen - bpos) : (NI)(slen - spos)); { NIM_BOOL T24_; T24_ = (NIM_BOOL)0; T24_ = equalMem__9bGgqEk7RXXl6eqM9c1HdELAsystem(((void*) ((&buf[(bpos)- 0]))), ((void*) ((NI)(((NI) (nimToCStringConv(s))) + spos))), ((NI) (n))); if (!!(T24_)) goto LA25_; result = NIM_FALSE; goto BeforeRet_; } LA25_: ; spos += n; bpos += n; } LA13: ; } } LA7: ; } } LA4_: ; } T27_ = (NIM_BOOL)0; T28_ = (NI)0; T28_ = readBuffer__Y9atVWUcVyKHG9aBP4D0P9czA(f, ((void*) ((&buf[(((NI) 0))- 0]))), ((NI) 1)); T27_ = (T28_ == ((NI) 0)); if (!(T27_)) goto LA29_; T27_ = (btotal == rtotal); LA29_: ; result = T27_; }BeforeRet_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, equalsFile__Wiam9c8x73Mtmbj0r4Ppikg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result; FILE* f; result = (NIM_BOOL)0; f = (FILE*)0; result = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 0), ((NI) -1)); { if (!result) goto LA3_; result = equalsFile__9bihNFg7Qajcg9arfx5cr9aHA(r, f); close__fU6ZlJAtQ9bre04EDZLdGsA_3(f); } LA3_: ; return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRope__LLRRC42xWBSkxzV9bsPu7lA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* head, NimStringDesc* filename) { NIM_BOOL result; FILE* f; result = (NIM_BOOL)0; f = (FILE*)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = open__gq12VLhVO0NBzUTnGgz4nw(&f, filename, ((tyEnum_FileMode__ZJfK20XeZ9bv2j1pZjw9aswg) 1), ((NI) -1)); if (!T3_) goto LA4_; { if (!!((head == NIM_NIL))) goto LA8_; writeRope__FwuzOBq6SLlanVUstm8q9cA(f, head); } LA8_: ; close__fU6ZlJAtQ9bre04EDZLdGsA_3(f); result = NIM_TRUE; } goto LA1_; LA4_: ; { result = NIM_FALSE; } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___T3CpMgcFHzYracJ80CUZBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_313), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6wQcdZnh9aH29ay5rwY6M5fA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_314), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y39ant8iE9bjKB0kbkRCAibQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_315), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RKXvZR1cmZW5dfjtFQCG3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_316), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___nEA33x9cMfuJw3ZiGbn25iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_317), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xK6HolrLvVFWil73hZYbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_318), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z2c9cvs0wVVVqTEZ3Qwe9bfw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_319), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AxDJCYpgPoquRsZtiOnpRw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_320), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dU9cenGIcVUltUO1088LhYQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_321), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TLpRy9aDJ1Ni4vccOIoiMbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_322), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RzB0z3UV9bb4kXUEGyS9crRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_323), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z1QwTAihBHnxe59cytXnhmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_324), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XZnCV59at0sqX6ShEjlFLgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_325), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YLzwVVtf4fuPYZVeMQOa0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_326), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CtS8L8cOLTsSuQ10mtHsvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) NIM_NIL), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mPpmmd13MIZLTbd1oOdSkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_327), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Th3qC4WgcAhWPSlLw7vZ9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_328), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RPy0XXevrEBMts1Mb9arGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_329), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gqwqalZtiJtCgAF9bY5S6qQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_330), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___G9bYX9bu7ufcttiARCDUJ0qg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_331), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___W0CV9bE9bNiLgazfFZjoQCBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_332), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ecC7jlB6gBWrt0K9byHohPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_333), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hFzCKQOJ8Eao2AJk5HOvxA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_334), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___62079cK9bsws1aAJqEmAGo6w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_335), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hO1UTpWJhaojnhUyqfmgPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_336), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___wlKCT75QSpBNooI9a2xvWeQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_8), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uD0SC9bUeWpB9cK7V1aBT9aNQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_337), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Uez7zQbKzeDFToq2Yh43bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_338), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JbygmsEkVsyK85BPVFvwbg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_339), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FLXrAGf7HFTHIGh8Xuickg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_340), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___hmfCuT8fgBmRlPR25L7ZOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_341), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HUHatwko3S0fuszXQAOSQQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_342), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gGKEcvCOVzpTQoSXzO01Dw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_343), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LMnNsJkYlruXHnF5LV9c3pA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_344), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uJ11bTQ8dBBAX88A2cyICw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_345), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2D3IUNoEAKKLxuRqVNosPQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_346), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___o7SGM9buciKf5BOjTvMKA7w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_347), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ht0mWR3LosfEZ8SopJcmEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_348), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GweM9byC8cQI9cehUzlYVs5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_349), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xnze9a4kYSwHurdPnhyNGzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_350), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGaOrvR5YSM9cGUajaqcNOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_351), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GF60428RM29aXV0LYutm9aOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_352), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ciTj4q9cGhcXiXY9bPemZVvw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_353), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HLoe040Vi0LPzmTid9aLGdw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_354), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tnP9cO5PduJRSEeqtm9bocEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_355), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6XcU2shl8EfYxL7utXbwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_356), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3GvB8fuMNh8BXF8IoORCxw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_357), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RhAtD9c9aECDorIc8rDhMF9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_358), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CSdlEV0i9aXEHNuC1G9aIEbw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_359), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4SLS9cx2c8VCFIilepFlOeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_360), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amX0pef5rA4JAmWZ6ZB2Nw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_361), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xAta147ahLKNrJMPPP5B6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_362), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sshAiIx49ba6saVSAWuyFuA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_363), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TmulmJw2SZspd0rz2PYvQw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_364), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UFeu00R8dNoyzL8vy54mnQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_365), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qYiwFpynEwFeSf3Aa2sS0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_366), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6xseTZmgyslBQb6RMm9b4wA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_367), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KsZXXO4zKP47iruPcSEryQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_368), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TUxzei0sBfo3GESRTg1T5w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_369), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ikDBM4Dyw9c2kuwAAswRyOw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_370), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ht9cduX4yJQKi2Gi685ag5A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_371), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wsnl5zC9cCEBdwJcHgpLf0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_372), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___deWmrKhbFG0MxH9cDr9cnhfQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_373), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HiCTlq0dXhMZvpDtUGWGQA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_374), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aagcnoz4kFWlzsoVgR9b0NQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_375), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oYhFcOWR4tEylepRJJLrlA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_376), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3RBmOS8xzFTxpuGVryQycg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_377), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___apXghcMDCUp9col7jN5spHA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_378), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cNvJ1SVovK9b29bKmwKyiijw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_379), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0mbMVYCe5Qwl9aQOKV3sh3w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_380), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___03lrwELd9clj29bFkdXAVxkw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_381), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8croAZ6oMdSPXHbIisuppw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_382), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TDLJ9ciKDBoW4ouZs855Csg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_383), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Mk2KRdMWX4H3L9aBEG2elgQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_384), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___pFXgvxsz2L5f27ImZwJwzQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_385), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___n9aTlv49bCxoRKQNZiWsaW2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_386), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y3oNivo8px1XzxmB9b2OY5g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_387), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Pnqkcr360suaX84kwXMuCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_388), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FA4ohw0aOufzzLhmw9aUAhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_389), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SWZi8EY4Pz39bBPSp9cbtZMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_390), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___XaBXRInsoVU7DBc2WK8dzg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_391), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NdMO5d09brFwLfDc8ciTSqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_392), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E62TlyqwqpEwqcA0YTjttw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_393), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___m4T7v0qnGpOgwmMenKcgwg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_394), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SKTmZPSgcdPr3Du3ia9b9czg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_395), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ItxAXpnPzfUbYRPsHgKrPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_396), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ggqZXIgPaS71ubw22cYODw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_397), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LLnl4aDVJynim7LQvfJKLQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_398), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Ob6yLhv7QvbU9bdZj8Nw2kA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_399), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qfsHROU9aHSaYGq3tpw1XDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_400), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___j9bcJJvtd9bur0VZUQL3ibgA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_401), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___32ITt7hKDrhn9bXvKbmnE9bw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_402), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ZAOkVi5SmgPcGpCSuSRXVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_403), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___smDIOmjGgf8ZP9bfDyv43bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_404), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1jtIbjhXi2wH1iWPyC9bgAQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_405), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___NPgb4kECDcV8MICSil6Rjw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_406), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cQHGAtgSLYV7mm9bnVGYGRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_407), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q4LBu2cVl8IcNTrtxd6B6A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_408), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M36w8F9bFwighD3K39bvtVWw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_409), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Wm11wQtuJBQgTy9a39apz0eA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_410), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0bUw514mSumiNnSjkD0bqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_411), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___6hxDi5nlebu1DFLqpYq5lw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_412), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___GkWgkK8SyjrFfWjGRwKWrw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_413), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___oubCLvBtU9aRB9bhG2vbCDeg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_414), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KTcAQx04UE87HYZ48ZBm2A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_415), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Y6zpqvbZwK8tJZiKs9agbGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_416), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2OGTIxEeE0xFVRpz5TxKyg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_417), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0xZtTB2eXM1dRd9aneL5VPw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_418), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___amO46kEKgIeOmW50ayV6nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_419), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3lABfXU9aXZsyfylYizY8KA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_420), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5JCQx3oDHEcLdsEz6Rx0Rw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_421), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dTtf7fil83VcW2Mkkr7scw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_422), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___88NG6Rr5xfTcA6hqLfZ2iw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_423), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1DWSTPxvqlc4A2xRDmjZDw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_424), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y5Z6ewsHLxj9ctzxTLPCLmw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_425), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CHBd5pGE9c8nq4KNqM8K48g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_426), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___y2h2X887dhz5sEoD4C8ezQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_427), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dQfg2HrsVY6E7P22Nis1MA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_428), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0b2Bm7vpM8YAMKp9cuAwg3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_429), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1Hh3EN9c4pkzdKB09bo9c9aTBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_430), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AOSgPOjXfsLWEICRXv3U2g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_431), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___gN4yb6p4ql6iVJOPAjLEJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_432), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___WIg2bxfQLkmzIdOv1JkRqw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_433), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3Klw9agVDELeF44OQ6PnRiA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_434), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LL6jCaqBGLwC1sCgmCAEhQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_435), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S9b9bs03lj0NJlhXUmrylsnA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_436), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___fphSfWWyYSWLARtGIpYB9aw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_437), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___As9aDT7fkqstj16MQnIGPhA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_438), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eAZ21NmzzIsugeSSkcxIkQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_439), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___D2dSwFjTnRSmeKOoMm6w0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_440), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HlU9bV2X0HOPcGJnQlGm9c9aQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_441), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___p2lIQAdDBUpuVZML6ecUOg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_442), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5hzyGWCNjqgqPj0O7sSnkg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_443), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___l1wvVBeU1Nnie8cWddgPCA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_444), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___yVZN2jQzbJwg3E9cehLff9cg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_445), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___E9br9b8BVYaWzg6CXcn9c6EXw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_446), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qPugJ1Nc2L1EdGwEF0AJ0Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_447), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HzZyrXo2QFynm1T8X76cCw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_448), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___O2nyVw4tGD6MMc6u7I9bH9cA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_449), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IQYZUimFiAV9axFM9c64hKjA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_450), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___RCJU8UTq9cE0Jsi59anAbTIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_451), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___S6vmSaSCgC4V2L5H7OWeZg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_452), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Eqr9cgWCkrZrUG3sg0CawIQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_453), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9c1lq60gbfPY9cyjQN4YouTQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_454), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___1nMXoOe6cENU7004pnh6wQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_455), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ALynLzo8zWvno8ZxASdm4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_456), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tlkWMVJPsx9aWUbp8FMjQ4w)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_457), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xPW5KjObCPL2lJmHFoqfjg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_458), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___mTh2rYVPWUnI8B7kU3NWUg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_459), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aoMj8hrcFi4HlPDZ9a9alpig)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_460), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Yj64cHk9ajrzJI39bfpBfOVA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_461), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9bY6R9buTsrqJYQAuD39cegOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_462), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___U9b6hkqS6N7XIWr0gy8z9bug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_463), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2MwhwhkHOiavfXQl9aey8nA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_464), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JRV6DlpqdegYGLcFjNPv0g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_465), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ryMkoQkM4zAjyp0800DrDQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_466), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___iW9bjdQoXkul7L0e76qo8XQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_467), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___i3z9am8Hzy69bSo575pRdzGw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_468), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SkAQPSnCyiRvin57XULW4A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_469), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Bym8FwH29aQE8fth9ar38yJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_470), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___CbbQqCp6itJgwKVRfTr69ag)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_471), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___HWgoOloM1oqcI9aZ9bEkoBhg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_472), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Anf1UHjOzz9aHgMOgtnEPZA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_473), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___tDrtnFWakp63hyE9cfImgZw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_474), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___JwpI2xnYNfR68HstfDi1yQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_475), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___23SvbIxPpf5MIOga79arr6g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_476), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___uVZXJGmbOGIG9bfkI4ZDwJQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_477), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UxL9a0Hh7Km0Z0DIk7hp9cBA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_478), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___QxiH9aM0po7vA19b2s1CjdEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_479), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___FZt89ajG3TKAhfL9aW4s7hcA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_480), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___5GaE39bOOeQZy3EFOEIy5QA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_481), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SA9cvbR3uc9cP50nnaEBJctw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_482), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KweYGQ9bFYg76nmoxpk8ksA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_483), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___AhY63HjLy2bPe9bslUNBuBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_484), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3m7YwdrxIvOkmvfnm5JYSA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_485), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TEWiK8QWtRTCIQ9av7sW8LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_486), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9an6bUHwpxqyL2kgNHX3MEg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_487), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___kLwAORKb0c4oFgFTN9aEN8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_488), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Qm29ctdy9c4sqKctTsqiBWIg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_489), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UyNt2Asj9aa2ScoGVo9cCnNw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_490), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xXvQyblNYV215UGR9cTka7Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_491), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___LYjQOKn1i9ccw8AFlvPGkCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_492), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___THj0xNXkqJf6reD7exsGbA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_493), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3oFXAbir9c7XcKzu9bpgAM9bA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_494), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4sbi76q7ZLqpKbD3pwJ59bQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_495), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Q9cOQGrP4lOdbYHXMQ1yZtg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_496), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___0AX4Q6cA8nOXUagvzFqt0A)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_497), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___qQ3g8SwjZoIFAay85NaiEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_498), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___M0TByFCTj9bbOkDSRpFz3LA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_499), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___OikfyLf8HmjI9auYLFoaVqg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_500), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___3KVF9aLACI1h11BqZrkzjNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_501), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ial810twbEzfkHaHMFYNCg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_502), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Z7wCJf0WipOQOQ4ZZNBIEw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_503), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Xpm9cGf2grEXdjAQV9arqWBQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_504), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sqxyWwlLrfrdyc9b3BINcXQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_505), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ztLQ2Orupb9b9b3KrCvoK9cbQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_506), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___PI6febxsdTbySkLsIEqHKw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_507), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___sGRyuC9caCxfdM1i8W4fjgw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_508), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___vWWA89aSvs5QwAFN4Jdr2IA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_509), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(NIM_BOOL, writeRopeIfNotEqual__Wiam9c8x73Mtmbj0r4Ppikg_2)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* r, NimStringDesc* filename) { NIM_BOOL result; result = (NIM_BOOL)0; { NIM_BOOL T3_; T3_ = (NIM_BOOL)0; T3_ = equalsFile__Wiam9c8x73Mtmbj0r4Ppikg(r, filename); if (!!(T3_)) goto LA4_; result = writeRope__LLRRC42xWBSkxzV9bsPu7lA(r, filename); } goto LA1_; LA4_: ; { result = NIM_FALSE; } LA1_: ; return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___lQuk161wRVxbYxfH80Iwcw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_510), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___UQrwMIIitnm9cEflSXdCkPg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_511), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___A9aKFJUF6ZjJQfrcPHJigOQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_512), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___8ehuHmXS8omgqFrdYMsPBg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_513), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2Opo6JkHmCRmDA87qcGfvg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_514), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___C7jQ1fH79bR8HRQrbJjFKDg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_515), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___2eu2gmgXiDUZkBgTVqD7pg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_516), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___cCI1wZSoDB14achJW7ZFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_517), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___dkLAWa1dMAcGEAyfUZ59bRA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_518), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___DuvwOyJJ9b2gpVM9cV7DCFSQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_519), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___4MBgNtJLOyqbjfGytl2OTw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_520), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___336bx9aXX7GZckfWQE5Jy3g)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_521), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___IbsmsXdtDOH7pLpzh9cmAOA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_522), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9cGelOO9b6sliTnobJf6XAsg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_523), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___aNorSJCSJyyDo7w0s6eynA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_524), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___BYRFs7dwiqyMIzbsx9cDq8Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_525), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TavFv5xK0dxxJCk9b4v34zg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_526), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___9aAWQyBOqadJYgBT29bzliAw)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_527), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___zpFS2Xy9cmoAoqCFSUQj1gg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_528), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___Nz9cwOtMmcX2gklRogKhyEA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_529), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGYo0XYmypYw3N26AYh7ug)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_530), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___e8Z4ajz6IErIB0a6mpq4Wg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_531), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___eqn09cqDPu9csxGUOSa2untg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_532), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___rZ5o6ziDKz4d3bfaN54Dgg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_533), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___YGa4o1aenD9cjoU03CAgtqQ)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_534), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___b2PLtFwpZkVmYhHWvW4i1Q)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_535), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___ctY4Nx9aQFC9bl9c2wbRLoFYA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_536), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___xsFAphqq4CRpmuZ79bXVLrA)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_537), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___SSpcZv60d0mAp5H4Mb5hpg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_538), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___TtzOadDB4I9a89cWej19a2PNg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_539), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*, percent___KKiSvh9a121M0uSQjcJhhMg)(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA** args, NI argsLen_0) { tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA* result; result = (tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*)0; result = runtimeFormat__9bvKdnhoYI2ta9agQNm3orMA(((NimStringDesc*) &TM__Vw9cfUOQOae9b9bzZBlucMZQg_540), args, argsLen_0); return result; } N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesInit000)(void) { { nimRegisterGlobalMarker(TM__Vw9cfUOQOae9b9bzZBlucMZQg_3); gCacheTries__5GfZTThHPBfB9bjRZdFluBw = ((NI) 0); gCacheMisses__fLRm9am8S0daYBVNK6JKyBg = ((NI) 0); gCacheIntTries__opyfsNv023Md1P05mqsDew = ((NI) 0); } } N_LIB_PRIVATE N_NIMCALL(void, compiler_ropesDatInit000)(void) { static TNimNode* TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[4]; static TNimNode TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[5]; NTI__OFzf0kSiPTcNreUIeJgWVA_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA); NTI__OFzf0kSiPTcNreUIeJgWVA_.kind = 17; NTI__OFzf0kSiPTcNreUIeJgWVA_.base = (&NTI__ytyiCJqK439aF9cIibuRVpAg_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, left); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[1].name = "left"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[1] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, right); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].typ = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[2].name = "right"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[2] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, L); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].typ = (&NTI__rR5Bzr1D5krxoo1NcNyeMA_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[3].name = "L"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[3] = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4]; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].kind = 1; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].offset = offsetof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA, data); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].typ = (&NTI__77mFvmsOLKik79ci2hXkHEg_); TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[4].name = "data"; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].len = 4; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].kind = 2; TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0].sons = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_2_4[0]; NTI__OFzf0kSiPTcNreUIeJgWVA_.node = &TM__Vw9cfUOQOae9b9bzZBlucMZQg_0[0]; NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.size = sizeof(tyObject_RopeObj__OFzf0kSiPTcNreUIeJgWVA*); NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.kind = 22; NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.base = (&NTI__OFzf0kSiPTcNreUIeJgWVA_); NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_.marker = Marker_tyRef__4hi0XQqK9aLiPuWT9acsXm9aQ; NTI__USLYl0Lpkimm4FABiJ3ldA_.size = sizeof(tyArray__USLYl0Lpkimm4FABiJ3ldA); NTI__USLYl0Lpkimm4FABiJ3ldA_.kind = 16; NTI__USLYl0Lpkimm4FABiJ3ldA_.base = (&NTI__4hi0XQqK9aLiPuWT9acsXm9aQ_); }
pooling_2x2.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if defined(__ARM_NEON) static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; for (int i = 0; i < outh; i++) { #if defined(__ARM_NEON) int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if defined(__ARM_NEON) #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #256] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n" "ld1 {v2.4s, v3.4s}, [%2], #32 \n" "fmax v0.4s, v0.4s, v2.4s \n" "fmax v1.4s, v1.4s, v3.4s \n" "fmaxp v2.4s, v0.4s, v1.4s \n" "subs %w0, %w0, #1 \n" "st1 {v2.4s}, [%3], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3" ); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%1]! \n" "vld1.f32 {d4-d7}, [%2]! \n" "vmax.f32 q0, q0, q2 \n" "vmax.f32 q1, q1, q3 \n" "vpmax.f32 d4, d0, d1 \n" "vpmax.f32 d5, d2, d3 \n" "subs %0, #1 \n" "vst1.f32 {d4-d5}, [%3]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(r0[0], r0[1]); float max1 = std::max(r1[0], r1[1]); *outptr = std::max(max0, max1); r0 += 2; r1 += 2; outptr++; } r0 += tailstep; r1 += tailstep; } } } #endif // __ARM_NEON
openmp-ex06.c
#include <stdio.h> #include <unistd.h> #include <omp.h> int main(void) { int num_threads, my_thread; num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); printf ("\"You're all individuals!\" said %d of %d.\n", my_thread, num_threads); /* You can also declare private variable(s) that shadow existing variables * with the private() clause of the openmp directive */ #pragma omp parallel private(num_threads,my_thread) { num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); sleep(1); printf("\"Yes, we're all individuals!\" replied %d of %d, sleepily.\n", my_thread, num_threads); } num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); printf ("\"I'm not,\" said %d of %d.\n", my_thread, num_threads); return 0; }
GrB_Matrix_wait.c
//------------------------------------------------------------------------------ // GrB_Matrix_wait: wait for a matrix to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a matrix, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GrB_Matrix_wait // finish all work on a matrix ( GrB_Matrix A, GrB_WaitMode waitmode ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE (A, "GrB_Matrix_wait (A, waitmode)") ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; //-------------------------------------------------------------------------- // finish all pending work on the matrix //-------------------------------------------------------------------------- if (waitmode != GrB_COMPLETE && GB_ANY_PENDING_WORK (A)) { GrB_Info info ; GB_BURBLE_START ("GrB_Matrix_wait") ; GB_OK (GB_wait (A, "matrix", Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "col_sampler.hpp" #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif namespace LightGBM { using json11::Json; /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data, bool is_constant_hessian) override { ResetTrainingDataInner(train_data, is_constant_hessian, true); } void ResetIsConstantHessian(bool is_constant_hessian) override { share_state_->is_constant_hessian = is_constant_hessian; } virtual void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin); void ResetConfig(const Config* config) override; inline void SetForcedSplit(const Json* forced_split_json) override { if (forced_split_json != nullptr && !forced_split_json->is_null()) { forced_split_json_ = forced_split_json; } else { forced_split_json_ = nullptr; } } Tree* Train(const score_t* gradients, const score_t *hessians, bool is_first_tree) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) const override; void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { if (subset == nullptr) { data_partition_->SetUsedDataIndices(used_indices, num_data); share_state_->SetUseSubrow(false); } else { ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false); share_state_->SetUseSubrow(true); share_state_->SetSubrowCopied(false); share_state_->bagging_use_indices = used_indices; share_state_->bagging_indices_cnt = num_data; } } void AddPredictionToScore(const Tree* tree, double* out_score) const override { CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); if (tree->num_leaves() <= 1) { return; } #pragma omp parallel for schedule(static, 1) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; /*! \brief Get output of parent node, used for path smoothing */ double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const; void GetDataLeafIndices(Tree* tree, int* data_leaf_index) const override; protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split, double parent_output); void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time); void RecomputeBestSplitForLeaf(int leaf, SplitInfo* split); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(const Tree* tree); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf) { SplitInner(tree, best_leaf, left_leaf, right_leaf, true); } void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt); /* Force splits with forced_split_json dict and then return num splits forced.*/ int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf, int* cur_depth); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraintsBase> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #elif USE_CUDA /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; ColSampler col_sampler_; const Json* forced_split_json_; std::unique_ptr<TrainingShareStates> share_state_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
sum_openmp.c
/* Copyright (C) 2021 The Blosc Developers <blosc@blosc.org> https://blosc.org License: BSD 3-Clause (see LICENSE.txt) Example program showing how to operate with compressed buffers. To compile this program for synthetic data (default): $ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2 To run: $ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$) Sum for uncompressed data: 199950000000 Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s Compression ratio: 762.9 MB -> 14.0 MB (54.6x) Compression time: 0.288 s, 2653.5 MB/s Sum for *compressed* data: 199950000000 Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s To use real (rainfall) data: $ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp And running it: $ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$) Sum for uncompressed data: 29741012 Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s Compression ratio: 381.5 MB -> 71.3 MB (5.3x) Compression time: 1.53 s, 249.1 MB/s Sum for *compressed* data: 29741012 Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <sys/stat.h> #include <errno.h> #include <assert.h> #include "blosc2.h" #define KB 1024. #define MB (1024*KB) #define GB (1024*MB) #define N (100 * 1000 * 1000) #define CHUNKSIZE (16 * 1000) #define NCHUNKS (N / CHUNKSIZE) #define NTHREADS 8 #define NITER 5 #ifdef RAINFALL #define SYNTHETIC false #else #define SYNTHETIC true #endif #if SYNTHETIC == true #define DTYPE int64_t #define CLEVEL 3 #define CODEC BLOSC_BLOSCLZ #else #define DTYPE float #define CLEVEL 1 #define CODEC BLOSC_LZ4 #endif int main(void) { static DTYPE udata[N]; DTYPE chunk_buf[CHUNKSIZE]; int32_t isize = CHUNKSIZE * sizeof(DTYPE); DTYPE sum, compressed_sum; int64_t nbytes, cbytes; blosc2_schunk* schunk; int i, j, nchunk; blosc_timestamp_t last, current; double ttotal, itotal; char* envvar = NULL; printf("Blosc version info: %s (%s)\n", BLOSC_VERSION_STRING, BLOSC_VERSION_DATE); // Fill the buffer for a chunk if (SYNTHETIC) { for (j = 0; j < CHUNKSIZE; j++) { chunk_buf[j] = j; } } else { struct stat info; const char *filegrid = "rainfall-grid-150x150.bin"; if (stat(filegrid, &info) != 0) { printf("Grid file %s not found!", filegrid); exit(1); } char *cdata = malloc(info.st_size); FILE *f = fopen(filegrid, "rb"); size_t blocks_read = fread(cdata, info.st_size, 1, f); assert(blocks_read == 1); fclose(f); int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf); if (dsize < 0) { printf("blosc_getitem() error. Error code: %d\n. Probably reading too much data?", dsize); exit(1); } free(cdata); } // Fill the uncompressed dataset with data chunks for (i = 0; i < N / CHUNKSIZE; i++) { for (j = 0; j < CHUNKSIZE; j++) { udata[i * CHUNKSIZE + j] = chunk_buf[j]; } } // Reduce uncompressed dataset ttotal = 1e10; sum = 0; for (int n = 0; n < NITER; n++) { sum = 0; blosc_set_timestamp(&last); #pragma omp parallel for reduction (+:sum) for (i = 0; i < N; i++) { sum += udata[i]; } blosc_set_timestamp(&current); itotal = blosc_elapsed_secs(last, current); if (itotal < ttotal) ttotal = itotal; } printf("Sum for uncompressed data: %10.0f\n", (double)sum); printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n", ttotal, (double)(isize * NCHUNKS) / (double)(ttotal * MB)); // Create a super-chunk container for the compressed container long codec = CODEC; envvar = getenv("SUM_COMPRESSOR"); if (envvar != NULL) { codec = blosc_compname_to_compcode(envvar); if (codec < 0) { printf("Unknown compresssor: %s\n", envvar); return 1; } } blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; cparams.compcode = (uint8_t)codec; long clevel = CLEVEL; envvar = getenv("SUM_CLEVEL"); if (envvar != NULL) { clevel = strtol(envvar, NULL, 10); } cparams.clevel = (uint8_t)clevel; cparams.typesize = sizeof(DTYPE); cparams.nthreads = 1; blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; dparams.nthreads = 1; blosc_set_timestamp(&last); blosc2_storage storage = {.cparams=&cparams, .dparams=&dparams}; schunk = blosc2_schunk_new(&storage); for (nchunk = 0; nchunk < NCHUNKS; nchunk++) { for (i = 0; i < CHUNKSIZE; i++) { chunk_buf[i] = udata[i + nchunk * CHUNKSIZE]; } blosc2_schunk_append_buffer(schunk, chunk_buf, isize); } blosc_set_timestamp(&current); ttotal = blosc_elapsed_secs(last, current); nbytes = schunk->nbytes; cbytes = schunk->cbytes; printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n", (double)nbytes / MB, (double)cbytes / MB, (1. * (double)nbytes) / (double)cbytes); printf("Compression time: %.3g s, %.1f MB/s\n", ttotal, (double)nbytes / (ttotal * MB)); int nthreads = NTHREADS; envvar = getenv("OMP_NUM_THREADS"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value >= 0)) { nthreads = (int)value; } } // Build buffers and contexts for computations int nchunks_thread = NCHUNKS / nthreads; int remaining_chunks = NCHUNKS - nchunks_thread * nthreads; blosc2_context **dctx = malloc(nthreads * sizeof(void*)); DTYPE** chunk = malloc(nthreads * sizeof(void*)); for (j = 0; j < nthreads; j++) { chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE)); } // Reduce uncompressed dataset blosc_set_timestamp(&last); ttotal = 1e10; compressed_sum = 0; for (int n = 0; n < NITER; n++) { compressed_sum = 0; #pragma omp parallel for private(nchunk) reduction (+:compressed_sum) for (j = 0; j < nthreads; j++) { dctx[j] = blosc2_create_dctx(dparams); for (nchunk = 0; nchunk < nchunks_thread; nchunk++) { blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk], INT32_MAX, (void*)(chunk[j]), isize); for (i = 0; i < CHUNKSIZE; i++) { compressed_sum += chunk[j][i]; //compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE; } } } for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) { blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], INT32_MAX, (void*)(chunk[0]), isize); for (i = 0; i < CHUNKSIZE; i++) { compressed_sum += chunk[0][i]; //compressed_sum += i + nchunk * CHUNKSIZE; } } blosc_set_timestamp(&current); itotal = blosc_elapsed_secs(last, current); if (itotal < ttotal) ttotal = itotal; } printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum); printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n", ttotal, (double)nbytes / (ttotal * MB)); //printf("sum, csum: %f, %f\n", sum, compressed_sum); if (SYNTHETIC) { // difficult to fulfill for single precision assert(sum == compressed_sum); } /* Free resources */ blosc2_schunk_free(schunk); return 0; }
GB_unop__identity_int64_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_bool) // op(A') function: GB (_unop_tran__identity_int64_bool) // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_bool) ( int64_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NETSPLITLM_fmt_plug.c
/* * NETHALFLM_fmt.c * Written by DSK (Based on NetLM/NetNTLM patch by JoMo-Kun) * Performs brute-force cracking of the HalfLM challenge/response pairs. * * Modified for performance and OMP support by magnum 2011 * * Storage Format: * domain\username:::lm response:nt response:challenge * * NOTE, in loader.c, the format appeared to be domain\username:::lm response:challenge * so that format has been built into the 'prepare' function (JimF). * * Code is in public domain. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NETHALFLM; #elif FMT_REGISTERS_H john_register_one(&fmt_NETHALFLM); #else #include <string.h> #ifdef _OPENMP #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 65536 #endif #endif // __MIC__ #include <omp.h> #endif #include "misc.h" #include "common.h" #include "formats.h" #include "unicode.h" #include <openssl/des.h> #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "nethalflm" #define FORMAT_NAME "HalfLM C/R" #define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 7 #define BINARY_SIZE 8 #define BINARY_ALIGN 4 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 48 #define TOTAL_LENGTH 12 + 2 * SALT_SIZE + CIPHERTEXT_LENGTH // these may be altered in init() if running OMP #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"", "G3RG3P00!", {"domain\\username", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "", "1122334455667788"} }, {"$NETHALFLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "G3RG3P00!"}, {"$NETHALFLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "g3rg3p0"}, {"$NETHALFLM$1122334455667788$1354FD5ABF3B627B8B49587B8F2BBA0F9F6C5E420824E0A2", "zeeez@1"}, {"", "G3RG3P0", {"domain\\username", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "", "1122334455667788"} }, {"", "ZEEEZ@1", {"domain\\username", "", "", "1354FD5ABF3B627B8B49587B8F2BBA0F9F6C5E420824E0A2", "", "1122334455667788"} }, // repeat last hash in exactly the same format that is used in john.pot {"$NETHALFLM$1122334455667788$1354fd5abf3b627b8b49587b8f2bba0f9f6c5e420824e0a2", "ZEEEZ@1"}, {NULL} }; static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1]; static uchar (*saved_pre)[8]; static uchar (*output)[BINARY_SIZE]; static uchar *challenge; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); saved_pre = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_pre)); output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output)); } static void done(void) { MEM_FREE(output); MEM_FREE(saved_pre); MEM_FREE(saved_plain); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (strncmp(ciphertext, "$NETHALFLM$", 11)!=0) return 0; if (strlen(ciphertext) < TOTAL_LENGTH) return 0; if (ciphertext[27] != '$') return 0; if (strncmp(&ciphertext[28 + 2 * SALT_SIZE], "00000000000000000000000000000000", 32) == 0) return 0; // This is NTLM ESS C/R for (pos = &ciphertext[28]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++) ; if (!*pos && pos - ciphertext - 28 == CIPHERTEXT_LENGTH) { return 1; } else return 0; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *tmp; if (!strncmp(split_fields[1], "$NETHALFLM$", 11)) return split_fields[1]; if (!split_fields[3]||!split_fields[4]||!split_fields[5]) return split_fields[1]; if (strlen(split_fields[3]) != CIPHERTEXT_LENGTH) return split_fields[1]; // if LMresp == NTresp then it's NTLM-only, not LM if (!strncmp(split_fields[3], split_fields[4], 48)) return split_fields[1]; // this string suggests we have an improperly formatted NTLMv2 if (strlen(split_fields[4]) > 31) { if (!strncmp(&split_fields[4][32], "0101000000000000", 16)) return split_fields[1]; } tmp = (char *) mem_alloc(12 + strlen(split_fields[3]) + strlen(split_fields[5]) + 1); sprintf(tmp, "$NETHALFLM$%s$%s", split_fields[5], split_fields[3]); if (valid(tmp,self)) { char *cp2 = str_alloc_copy(tmp); MEM_FREE(tmp); return cp2; } MEM_FREE(tmp); return split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TOTAL_LENGTH + 1] = {0}; memcpy(out, ciphertext, TOTAL_LENGTH); strlwr(&out[10]); /* Exclude: $NETHALFLM$ */ return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } binary; int i; ciphertext+=28; for (i=0; i<BINARY_SIZE; i++) { binary.c[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary.c[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } return binary.c; } static inline void setup_des_key(unsigned char key_56[], DES_key_schedule *ks) { DES_cblock key; key[0] = key_56[0]; key[1] = (key_56[0] << 7) | (key_56[1] >> 1); key[2] = (key_56[1] << 6) | (key_56[2] >> 2); key[3] = (key_56[2] << 5) | (key_56[3] >> 3); key[4] = (key_56[3] << 4) | (key_56[4] >> 4); key[5] = (key_56[4] << 3) | (key_56[5] >> 5); key[6] = (key_56[5] << 2) | (key_56[6] >> 6); key[7] = (key_56[6] << 1); DES_set_key(&key, ks); } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; DES_key_schedule ks; int i; #ifdef _OPENMP #pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_pre) #endif for(i=0; i<count; i++) { /* DES-encrypt challenge using the partial LM hash */ setup_des_key(saved_pre[i], &ks); DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)output[i], &ks, DES_ENCRYPT); } return count; } static int cmp_all(void *binary, int count) { int index; for(index=0; index<count; index++) if (!memcmp(output[index], binary, BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(output[index], binary, BINARY_SIZE); } static int cmp_exact(char *source, int index) { return !memcmp(output[index], get_binary(source), BINARY_SIZE); } static void *get_salt(char *ciphertext) { static union { unsigned char c[SALT_SIZE]; ARCH_WORD_32 dummy; } out; int i; ciphertext += 11; for (i = 0; i < SALT_SIZE; ++i) { out.c[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } return (void*)out.c; } static void set_salt(void *salt) { challenge = salt; } static void netsplitlm_set_key(char *key, int index) { const unsigned char magic[] = {0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25}; DES_key_schedule ks; strnzcpyn((char *)saved_plain[index], key, PLAINTEXT_LENGTH + 1); /* Upper-case password */ enc_strupper((char *)saved_plain[index]); /* Generate first 8-bytes of LM hash */ setup_des_key(saved_plain[index], &ks); DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)saved_pre[index], &ks, DES_ENCRYPT); } static char *get_key(int index) { return (char *)saved_plain[index]; } static int salt_hash(void *salt) { return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1); } static int get_hash_0(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(ARCH_WORD_32 *)output[index] & PH_MASK_6; } struct fmt_main fmt_NETHALFLM = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_8_BIT | FMT_TRUNC | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_OMP_BAD, { NULL }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, netsplitlm_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/cuda/vector_cudahost.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/json11.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "col_sampler.hpp" #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "monotone_constraints.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered Hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif namespace LightGBM { using json11::Json; /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data, bool is_constant_hessian) override { ResetTrainingDataInner(train_data, is_constant_hessian, true); } void ResetIsConstantHessian(bool is_constant_hessian) override { share_state_->is_constant_hessian = is_constant_hessian; } virtual void ResetTrainingDataInner(const Dataset* train_data, bool is_constant_hessian, bool reset_multi_val_bin); void ResetConfig(const Config* config) override; inline void SetForcedSplit(const Json* forced_split_json) override { if (forced_split_json != nullptr && !forced_split_json->is_null()) { forced_split_json_ = forced_split_json; } else { forced_split_json_ = nullptr; } } Tree* Train(const score_t* gradients, const score_t *hessians, bool is_first_tree) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) const override; void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override { if (subset == nullptr) { data_partition_->SetUsedDataIndices(used_indices, num_data); share_state_->SetUseSubrow(false); } else { ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false); share_state_->SetUseSubrow(true); share_state_->SetSubrowCopied(false); share_state_->bagging_use_indices = used_indices; share_state_->bagging_indices_cnt = num_data; } } void AddPredictionToScore(const Tree* tree, double* out_score) const override { CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); if (tree->num_leaves() <= 1) { return; } #pragma omp parallel for schedule(static, 1) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; /*! \brief Get output of parent node, used for path smoothing */ double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const; protected: void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_, int feature_index, int real_fidx, int8_t is_feature_used, int num_data, const LeafSplits* leaf_splits, SplitInfo* best_split, double parent_output); void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time); void RecomputeBestSplitForLeaf(Tree* tree, int leaf, SplitInfo* split); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(const Tree* tree); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf) { SplitInner(tree, best_leaf, left_leaf, right_leaf, true); } void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf, bool update_cnt); /* Force splits with forced_split_json dict and then return num splits forced.*/ int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf, int* cur_depth); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores minimum and maximum constraints for each leaf */ std::unique_ptr<LeafConstraintsBase> constraints_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #elif USE_CUDA /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, CHAllocator<score_t>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_; #endif /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; ColSampler col_sampler_; const Json* forced_split_json_; std::unique_ptr<TrainingShareStates> share_state_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
GB_binop__first_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__first_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fp32) // A*D function (colscale): GB (_AxD__first_fp32) // D*A function (rowscale): GB (_DxB__first_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__first_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__first_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fp32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: float // A type: float // B,b type: float // BinaryOp: cij = aij #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP32 || GxB_NO_FIRST_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
django_fmt_plug.c
/* Django 1.4 patch for JtR. Hacked together during May of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$django$*type*django-hash * * Where, * * type => 1, for Django 1.4 pbkdf_sha256 hashes and * * django-hash => Second column of "SELECT username, password FROM auth_user" * * July, 2012, the oSSL PKCS5_PBKDF2_HMAC function was replaced with a much faster * function pbkdf2() designed by JimF. Originally this function was designed for * the mscash2 (DCC2). The same pbkdf2 function, is used, and simply required small * changes to use SHA256. * * This new code is 3x to 4x FASTER than the original oSSL code. Even though it is * only useing oSSL functions. A lot of the high level stuff in oSSL sux for speed. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_django; #elif FMT_REGISTERS_H john_register_one(&fmt_django); #else // uncomment this header to use the slower PKCS5_PBKDF2_HMAC function. // Note, PKCS5_PBKDF2_HMAC is ONLY available in oSSL 1.00 + (1.0c I think to be exact) //#include <openssl/evp.h> #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "base64.h" #include "base64_convert.h" #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 // tuned on core i7 #endif static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "Django" #define FORMAT_NAME "" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT " (x10000)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define HASH_LENGTH 44 #define BINARY_SIZE 32 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests django_tests[] = { {"$django$*1*pbkdf2_sha256$10000$qPmFbibfAY06$x/geVEkdZSlJMqvIYJ7G6i5l/6KJ0UpvLUU6cfj83VM=", "openwall"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd7$2nTDwPhSsDKOwpKiV04teVtf+a14Rs7na/lIB3KnHkM=", "123"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd1$bkdQo9RoatRomupPFP+XEo+Guuirq4mi+R1cFcV0U3M=", "openwall"}, {"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd6$Uq33DAHOFHUED+32IIqCqm+ITU1mhsGOJ7YwFf6h+6k=", "password"}, {"$django$*1*pbkdf2_sha256$10000$34L3roCQ6ZfN$R21tJK1sIDfmj9BfBocefFfuGVwE3pXcLEhChNjc+pU=", "0123456789012345678901234567890123456789012345678901234567890123"}, {"$django$*1*pbkdf2_sha256$10000$7qPqyUDw8kZV$pFmVRjlHvayoWEy8ZWXkHgfmgImUKLmkmruclpYVAxM=", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int type; int iterations; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, "$django$*", 9) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy;; ctcopy += 9; if ((p = strtokm(ctcopy, "*")) == NULL) /* type */ goto err; /* type must be 1 */ if (!isdec(p)) goto err; if (atoi(p) != 1) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* algorithm */ goto err; if (strcmp(p, "pbkdf2_sha256") != 0) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdec(p)) // FIXME: what about iterations == 0? goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (strlen(p) > sizeof(cur_salt->salt)-1) goto err; if ((p = strtokm(NULL, "")) == NULL) /* hash */ goto err; if (strlen(p)-1 != base64_valid_length(p,e_b64_mime,flg_Base64_MIME_TRAIL_EQ) || strlen(p)-1 > HASH_LENGTH-1) { goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char Buf[120], *ctcopy=Buf; char *p, *t; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); strncpy(Buf, ciphertext, 119); Buf[119] = 0; ctcopy += 9; /* skip over "$django$*" */ p = strtokm(ctcopy, "*"); cs.type = atoi(p); strtokm(NULL, "$"); t = strtokm(NULL, "$"); cs.iterations = atoi(t); t = strtokm(NULL, "$"); strcpy((char*)cs.salt, t); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; p = strrchr(ciphertext, '$') + 1; base64_decode(p, strlen(p), (char*)out); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; union { ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = crypt_out[i+index]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), 32, 0); #else // PKCS5_PBKDF2_HMAC(saved_key[index], strlen(saved_key[index]), // cur_salt->salt, strlen((char*)cur_salt->salt), // cur_salt->iterations, EVP_sha256(), 32, (unsigned char*)crypt_out[index]); pbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void django_set_key(char *key, int index) { strcpy(saved_key[index], key); } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_django = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, django_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, django_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
rose_v1_accumulateForce.c
#include <omp.h> void AccumulateForce(int *idxBound,int *idxList,int len,double *tmp,double *force) { for (register int ii = 0; ii <= len - 1; ii += 1) { int count = idxBound[ii + 1] - idxBound[ii]; int *list = &idxList[idxBound[ii]]; double sum = 0.0; #pragma omp parallel for reduction (+:sum) firstprivate (count) for (register int jj = 0; jj <= count - 1; jj += 1) { int idx = list[jj]; sum += tmp[idx]; } force[ii] += sum; } return ; }
GB_binop__lor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_08__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_04__lor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int64) // A*D function (colscale): GB (_AxD__lor_int64) // D*A function (rowscale): GB (_DxB__lor_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int64) // C=scalar+B GB (_bind1st__lor_int64) // C=scalar+B' GB (_bind1st_tran__lor_int64) // C=A+scalar GB (_bind2nd__lor_int64) // C=A'+scalar GB (_bind2nd_tran__lor_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_INT64 || GxB_NO_LOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pooling_hcl_arm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include <assert.h> #include <arm_neon.h> #include "pooling_param.h" #define POOL_GENERIC 0 #define POOL_K2S2 1 #define POOL_K3S2 2 #define POOL_K3S1 3 static inline float arm64_max(float a, float b) { if (a > b) return a; else return b; } static inline float arm64_min(float a, float b) { if (a > b) return b; else return a; } typedef void (*pooling_kernel_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int, int, int, int, int, int, int pad_h1, int pad_w1, int); static void avg_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t sum0 = vaddq_f32(p00, p10); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); float32x4_t sum1 = vaddq_f32(p01, p11); #ifdef __aarch64__ sum0 = vpaddq_f32(sum0, sum1); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1)); sum0 = vcombine_f32(sum0_1, sum0_2); #endif sum0 = vmulq_n_f32(sum0, 0.25f); vst1q_f32(out_ptr, sum0); line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t sum = vadd_f32(p1, p2); *out_ptr = (sum[0] + sum[1]) * 0.25f; out_ptr++; line0 += 2; line1 += 2; } if (pad_w1) { *out_ptr = (line0[0] + line1[0]) * 0.5f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ p00 = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); p00 = vcombine_f32(sum0_1, sum0_2); #endif p00 = vmulq_n_f32(p00, 0.5f); vst1q_f32(out_ptr, p00); line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); *out_ptr = (p1[0] + p1[1]) * 0.5f; out_ptr++; line0 += 2; } if (pad_w1) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_2x2s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); #ifdef __aarch64__ float32x4_t max0 = vmaxq_f32(p00, p10); float32x4_t max1 = vmaxq_f32(p01, p11); /* pairwaise max */ float32x4_t _max = vpmaxq_f32(max0, max1); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10)); float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10)); max0_1 = vpmax_f32(max0_1, max0_2); float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11)); float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11)); max1_1 = vpmax_f32(max1_1, max1_2); float32x4_t _max = vcombine_f32(max0_1, max1_1); #endif vst1q_f32(out_ptr, _max); line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t _max = vmax_f32(p1, p2); *out_ptr = arm64_max(_max[0], _max[1]); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 > 0) { *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if (pad_h1 > 0) { for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ p00 = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); p00 = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, p00); line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); *out_ptr = arm64_max(p1[0], p1[1]); out_ptr++; line0 += 2; } if (pad_w1 > 0) { *out_ptr = line0[0]; out_ptr++; } } } } static void avg_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2); sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); sum0 = vaddq_f32(sum0, sum1); sum0 = vmulq_n_f32(sum0, 0.16666667f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; out_ptr++; } else if (pad_w1 == 2) { *out_ptr = (line0[0] + line1[0]) * 0.5f; out_ptr++; } } else if (pad_h1 == 2) { float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); sum0 = vmulq_n_f32(sum0, 0.3333333f); vst1q_f32(out_ptr, sum0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2]) * 0.3333333f; out_ptr++; line0 += 2; } if (pad_w1 == 1) { *out_ptr = (line0[0] + line0[1]) * 0.5f; out_ptr++; } else if (pad_w1 == 2) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_3x3s2(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (pad_w1 > 0) { outw--; } if (pad_h1 > 0) { outh--; } int block_w = outw >> 2; int remain_w = inw - outw * 2; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; const float* line1 = line0 + inw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; for (int i = 0; i < outh; i++) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { /* p00 = [1,2,3,4,5,6,7,8] p00.val[0]=[1,3,5,7] max0 = [2,4,6,8] p00_new = [9,10,11,12,13,14,15,16] p01 = [3,5,7,9] max0=max(max0,p01)=[3,5,7,9] */ float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(arm64_max(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (pad_w1 == 1) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), arm64_max(line1[0], line1[1])); *out_ptr = arm64_max(arm64_max(line2[0], line2[1]), max0); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if (pad_h1 == 1) { float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); vst1q_f32(out_ptr, vmaxq_f32(max0, max1)); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); *out_ptr = arm64_max(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (pad_w1 == 1) { *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), arm64_max(line1[0], line1[1])); out_ptr++; } } } } static void avg_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (inw % 2 == 0) outw--; if (inh % 2 == 0) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line00 = input + c * in_hw; float* out_ptr = output + c * out_hw; // h begin if (is_caffe == 0) *out_ptr = line00[0]; else *out_ptr = line00[0] * 0.25f; out_ptr++; line00++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line00); float32x4_t p01 = vld1q_f32(line00 + 4); #ifdef __aarch64__ float32x4_t sum0 = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t sum0 = vcombine_f32(sum0_1, sum0_2); #endif if (is_caffe == 0) sum0 = vmulq_n_f32(sum0, 0.5f); else sum0 = vmulq_n_f32(sum0, 0.25f); vst1q_f32(out_ptr, sum0); line00 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line00[0] + line00[1]) * 0.5f; else *out_ptr = (line00[0] + line00[1]) * 0.25f; out_ptr++; line00 += 2; } if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = line00[0]; else *out_ptr = line00[0] * 0.25f; out_ptr++; } line00 += remain_w; // h center const float* line0 = line00; const float* line1 = line0 + inw; for (int i = 1; i < outh; i++) { // w begin if (is_caffe == 0) *out_ptr = (line0[0] + line1[0]) * 0.5f; else *out_ptr = (line0[0] + line1[0]) * 0.25f; out_ptr++; line0++; line1++; // w center for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t sum0 = vaddq_f32(p00, p10); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); float32x4_t sum1 = vaddq_f32(p01, p11); #ifdef __aarch64__ float32x4_t _sum = vpaddq_f32(sum0, sum1); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(sum0), vget_high_f32(sum0)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(sum1), vget_high_f32(sum1)); float32x4_t _sum = vcombine_f32(sum0_1, sum0_2); #endif _sum = vmulq_n_f32(_sum, 0.25f); vst1q_f32(out_ptr, _sum); out_ptr += 4; line0 += 8; line1 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; out_ptr++; line0 += 2; line1 += 2; } // w end if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = (line0[0] + line1[0]) * 0.5f; else *out_ptr = (line0[0] + line1[0]) * 0.25f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } // h end if (inh % 2 == 0) { if (is_caffe == 0) *out_ptr = line0[0]; else *out_ptr = line0[0] * 0.25f; out_ptr++; line0++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ float32x4_t _sum = vpaddq_f32(p00, p01); #else float32x2_t sum0_1 = vpadd_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t sum0_2 = vpadd_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _sum = vcombine_f32(sum0_1, sum0_2); #endif if (is_caffe == 0) _sum = vmulq_n_f32(_sum, 0.5f); else _sum = vmulq_n_f32(_sum, 0.25f); vst1q_f32(out_ptr, _sum); out_ptr += 4; line0 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1]) * 0.5f; else *out_ptr = (line0[0] + line0[1]) * 0.25f; out_ptr++; line0 += 2; } if (inw % 2 == 0) { if (is_caffe == 0) *out_ptr = line0[0]; else *out_ptr = line0[0] * 0.25f; } } } } static void max_2x2s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (inw % 2 == 0) outw--; if (inh % 2 == 0) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line00 = input + c * in_hw; float* out_ptr = output + c * out_hw; // h begin *out_ptr = line00[0]; out_ptr++; line00++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line00); float32x4_t p01 = vld1q_f32(line00 + 4); #ifdef __aarch64__ float32x4_t _max = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _max = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line00 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = arm64_max(line00[0], line00[1]); out_ptr++; line00 += 2; } if (inw % 2 == 0) { *out_ptr = line00[0]; out_ptr++; } line00 += remain_w; // h center const float* line0 = line00; const float* line1 = line0 + inw; for (int i = 1; i < outh; i++) { // w begin *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; line0++; line1++; // w center for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p10 = vld1q_f32(line1); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t p11 = vld1q_f32(line1 + 4); #ifdef __aarch64__ float32x4_t max0 = vmaxq_f32(p00, p10); float32x4_t max1 = vmaxq_f32(p01, p11); float32x4_t _max = vpmaxq_f32(max0, max1); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_low_f32(p10)); float32x2_t max0_2 = vpmax_f32(vget_high_f32(p00), vget_high_f32(p10)); max0_1 = vpmax_f32(max0_1, max0_2); float32x2_t max1_1 = vpmax_f32(vget_low_f32(p01), vget_low_f32(p11)); float32x2_t max1_2 = vpmax_f32(vget_high_f32(p01), vget_high_f32(p11)); max1_1 = vpmax_f32(max1_1, max1_2); float32x4_t _max = vcombine_f32(max0_1, max1_1); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line0 += 8; line1 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { float32x2_t p1 = vld1_f32(line0); float32x2_t p2 = vld1_f32(line1); float32x2_t _max = vmax_f32(p1, p2); *out_ptr = arm64_max(_max[0], _max[1]); out_ptr++; line0 += 2; line1 += 2; } // w end if (inw % 2 == 0) { *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } // h end if (inh % 2 == 0) { *out_ptr = line0[0]; out_ptr++; line0++; for (int j = 0; j < block_w; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); #ifdef __aarch64__ float32x4_t _max = vpmaxq_f32(p00, p01); #else float32x2_t max0_1 = vpmax_f32(vget_low_f32(p00), vget_high_f32(p00)); float32x2_t max0_2 = vpmax_f32(vget_low_f32(p01), vget_high_f32(p01)); float32x4_t _max = vcombine_f32(max0_1, max0_2); #endif vst1q_f32(out_ptr, _max); out_ptr += 4; line0 += 8; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = arm64_max(line0[0], line0[1]); out_ptr++; line0 += 2; } if (inw % 2 == 0) { *out_ptr = line0[0]; } } } } static void max_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // fprintf(stderr, "max_3x3s2_p1\n"); int in_hw = inw * inh; int out_hw = outh * outw; if (is_caffe == 1 || inw % 2 == 1) outw--; if (is_caffe == 1 || inh % 2 == 1) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; // h begin --------------------------------------- *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; line1 += 1; line2 += 1; float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max1 = vmaxq_f32(max1, max2); vst1q_f32(out_ptr, max1); p10 = p10_new; p20 = p20_new; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(max1, max2); out_ptr++; line1 += 2; line2 += 2; } if (inw % 2 == 1) { *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; } else if (is_caffe == 1 && inw % 2 == 0) { *out_ptr = arm64_max(line1[0], line2[0]); out_ptr++; } line1 += remain_w; line2 += remain_w; // h center --------------------------------------- const float* line0 = line1; line1 = line2; line2 = line1 + inw; for (int i = 1; i < outh; i++) { // left float max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; line0 += 1; line1 += 1; line2 += 1; // mid float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t max2 = vmaxq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_f32(max2, p21); max0 = vmaxq_f32(vmaxq_f32(max0, max1), max2); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(arm64_max(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if (inw % 2 == 1) { max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = arm64_max(arm64_max(line0[0], line1[0]), line2[0]); out_ptr++; } line0 += inw + remain_w; line1 += inw + remain_w; line2 += inw + remain_w; } // h end ------------------------------------------ if (inh % 2 == 1) { *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); out_ptr++; line0 += 1; line1 += 1; float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t max1 = vmaxq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_f32(max1, p11); max0 = vmaxq_f32(max0, max1); vst1q_f32(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); *out_ptr = arm64_max(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if (inw % 2 == 1) { *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = arm64_max(line0[0], line1[0]); out_ptr++; } } else if (inh % 2 == 0 && is_caffe == 1) { *out_ptr = arm64_max(line0[0], line0[1]); out_ptr++; line0 += 1; float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t max0 = vmaxq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_f32(max0, p01); vst1q_f32(out_ptr, max0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); out_ptr++; line0 += 2; } if (inw % 2 == 1) { *out_ptr = arm64_max(line0[0], line0[1]); out_ptr++; } else if (inw % 2 == 0) { *out_ptr = line0[0]; } } } } static void avg_3x3s2_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int out_hw = outh * outw; if (is_caffe == 1 || inw % 2 == 1) outw--; if (is_caffe == 1 || inh % 2 == 1) outh--; int block_w = (outw - 1) >> 2; int remain_w = inw - outw * 2 + 1; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * out_hw; // h begin --------------------------------------- if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line1 += 1; line2 += 1; float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum1 = vaddq_f32(sum1, sum2); if (is_caffe == 0) sum1 = vmulq_n_f32(sum1, 0.16666667f); else sum1 = vmulq_n_f32(sum1, 0.11111111f); vst1q_f32(out_ptr, sum1); p10 = p10_new; p20 = p20_new; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.16666667f; else *out_ptr = (line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line1 += 2; line2 += 2; } if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.25f; else *out_ptr = (line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line1[0] + line2[0]) * 0.16666667f; out_ptr++; } line1 += remain_w; line2 += remain_w; // h center --------------------------------------- const float* line0 = line1; line1 = line2; line2 = line1 + inw; for (int i = 1; i < outh; i++) { // left if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; line2 += 1; // mid float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); float32x4x2_t p20 = vld2q_f32(line2); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); float32x4x2_t p20_new = vld2q_f32(line2 + 8); float32x4_t sum2 = vaddq_f32(p20.val[0], p20.val[1]); float32x4_t p21 = vextq_f32(p20.val[0], p20_new.val[0], 1); sum2 = vaddq_f32(sum2, p21); sum0 = vaddq_f32(vaddq_f32(sum0, sum1), sum2); sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 8; line1 += 8; line2 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // end if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1] + line2[0] + line2[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line1[0] + line2[0]) * 0.16666667f; out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } // h end------------------------------- if (inh % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; line0 += 1; line1 += 1; float32x4x2_t p00 = vld2q_f32(line0); float32x4x2_t p10 = vld2q_f32(line1); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); float32x4x2_t p10_new = vld2q_f32(line1 + 8); float32x4_t sum1 = vaddq_f32(p10.val[0], p10.val[1]); float32x4_t p11 = vextq_f32(p10.val[0], p10_new.val[0], 1); sum1 = vaddq_f32(sum1, p11); sum0 = vaddq_f32(sum0, sum1); if (is_caffe == 0) sum0 = vmulq_n_f32(sum0, 0.16666667f); else sum0 = vmulq_n_f32(sum0, 0.11111111f); vst1q_f32(out_ptr, sum0); p00 = p00_new; p10 = p10_new; line0 += 8; line1 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.16666667f; else *out_ptr = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]) * 0.11111111f; out_ptr++; line0 += 2; line1 += 2; } if (inw % 2 == 1) { if (is_caffe == 0) *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.25f; else *out_ptr = (line0[0] + line0[1] + line1[0] + line1[1]) * 0.11111111f; out_ptr++; } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line1[0]) * 0.16666667f; out_ptr++; } } else if (inw % 2 == 0 && is_caffe == 1) { *out_ptr = (line0[0] + line0[1]) * 0.16666667f; out_ptr++; line0 += 1; float32x4x2_t p00 = vld2q_f32(line0); for (int j = 0; j < block_w; j++) { float32x4x2_t p00_new = vld2q_f32(line0 + 8); float32x4_t sum0 = vaddq_f32(p00.val[0], p00.val[1]); float32x4_t p01 = vextq_f32(p00.val[0], p00_new.val[0], 1); sum0 = vaddq_f32(sum0, p01); sum0 = vmulq_n_f32(sum0, 0.16666667f); vst1q_f32(out_ptr, sum0); p00 = p00_new; line0 += 8; out_ptr += 4; } for (int j = block_w * 4 + 1; j < outw; j++) { *out_ptr = (line0[0] + line0[1] + line0[2]) * 0.16666667f; out_ptr++; line0 += 2; } if (inw % 2 == 1) { *out_ptr = (line0[0] + line0[1]) * 0.16666667f; out_ptr++; } else if (inw % 2 == 0) { *out_ptr = line0[0] * 0.25f; out_ptr++; } } } } static void max_3x3s1_p1(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { // fprintf(stderr, "max_3x3s1_p1\n"); int in_hw = inw * inh; int mid_w = inw - 2; int mid_h = inh - 2; for (int c = 0; c < inc; c++) { const float* line1 = input + c * in_hw; const float* line2 = line1 + inw; float* out_ptr = output + c * in_hw; // h begin left----[line1+=0]----------------------------------- *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; // h begin center----[line1+=1]---------------------------------- for (int j = 0; j < mid_w; j++) { float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(max2, max1); out_ptr++; line1 += 1; line2 += 1; } // h begin right----[line1+=2]----------------------------------- *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); out_ptr++; line1 += 2; line2 += 2; // h center --------------------------------------- const float* line0 = input + c * in_hw; for (int i = 0; i < mid_h; i++) { // left float max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; // mid for (int j = 0; j < mid_w; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); float max2 = arm64_max(arm64_max(line2[0], line2[1]), line2[2]); *out_ptr = arm64_max(arm64_max(max0, max1), max2); out_ptr++; line0 += 1; line1 += 1; line2 += 1; } max0 = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line2[0], line2[1])); *out_ptr = arm64_max(arm64_max(line0[0], line0[1]), max0); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } // h end ------------------------------------------ *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); out_ptr++; for (int j = 0; j < mid_w; j++) { float max0 = arm64_max(arm64_max(line0[0], line0[1]), line0[2]); float max1 = arm64_max(arm64_max(line1[0], line1[1]), line1[2]); *out_ptr = arm64_max(max0, max1); out_ptr++; line0 += 1; line1 += 1; } *out_ptr = arm64_max(arm64_max(line1[0], line1[1]), arm64_max(line0[0], line0[1])); } } static void avg_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float sum = 0.f; for (int j = 0; j < block; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); p00 = vaddq_f32(p00, p01); // p00=vpaddq_f32(p00,p00); // sum+=(p00[0]+p00[1]); sum += (p00[0] + p00[1] + p00[2] + p00[3]); line0 += 8; } for (int j = tail; j < in_hw; j++) { sum += line0[0]; line0++; } *out_ptr = sum / in_hw; } } static void max_global(const float* input, float* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe) { int in_hw = inw * inh; int block = in_hw >> 3; int tail = in_hw & ~7; for (int c = 0; c < inc; c++) { const float* line0 = input + c * in_hw; float* out_ptr = output + c; float32x4_t p00 = vld1q_f32(line0); float32x4_t res = p00; for (int j = 0; j < block; j++) { float32x4_t p00 = vld1q_f32(line0); float32x4_t p01 = vld1q_f32(line0 + 4); float32x4_t max0 = vmaxq_f32(p00, p01); res = vmaxq_f32(res, max0); line0 += 8; } float max_ = arm64_max(arm64_max(res[0], res[1]), arm64_max(res[2], res[3])); for (int j = tail; j < in_hw; j++) { max_ = arm64_max(max_, line0[0]); line0++; } *out_ptr = max_; } } int pooling_kernel_perf_prerun(struct ir_tensor* input, struct ir_tensor* out, struct pool_param* param) { int pool_size = POOL_GENERIC; /* global pooling */ if (param->global) { if (param->pool_method == POOL_AVG) param->funct = ( pooling_kernel_t )avg_global; else if (param->pool_method == POOL_MAX) param->funct = ( pooling_kernel_t )max_global; assert(param->funct != NULL); return 0; } /* general pooling */ if (param->stride_h == 2 && param->stride_w == 2) { if (param->kernel_h == 2 && param->kernel_w == 2) pool_size = POOL_K2S2; else if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S2; } else if (param->stride_h == 1 && param->stride_w == 1) { if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S1; } /* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_MAX) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )max_2x2s2; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )max_3x3s2; } else if (param->pad_h0 == 1) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )max_2x2s2_p1; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )max_3x3s2_p1; else if (pool_size == POOL_K3S1) param->funct = ( pooling_kernel_t )max_3x3s1_p1; } } if (param->funct != NULL) return 0; else { fprintf(stderr, "perf general max pooling func not be find\n"); return -1; } } /* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_AVG) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (param->pad_h0 == 0 && param->pad_h1 == 0) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )avg_2x2s2; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )avg_3x3s2; } else if (param->pad_h0 == 1 && param->pad_h1 == 1) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_t )avg_2x2s2_p1; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_t )avg_3x3s2_p1; } } if (param->funct != NULL) return 0; else { fprintf(stderr, "perf general avg pooling func not be find\n"); return -1; } } fprintf(stderr, "perf pooling func not be find\n"); return -1; } int pooling_kernel_perf_run(struct ir_tensor* input, struct ir_tensor* output, struct pool_param* param, int num_thread) { // fprintf(stderr, "perf pooling_kernel_run\n"); int is_caffe = param->caffe_flavor; pooling_kernel_t kernel = (pooling_kernel_t)(param->funct); int batch = input->dims[0]; int c = input->dims[1]; int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int img_size = c * in_h * in_w; int feature_size = c * out_h * out_w; for (int n = 0; n < batch; n++) { void* input_frame = input->data + n * img_size * input->elem_size; void* output_frame = output->data + n * feature_size * output->elem_size; #pragma omp parallel for num_threads(num_thread) for (int ch = 0; ch < c; ch++) { void* cur_input = input_frame + ch * in_h * in_w * input->elem_size; void* cur_output = output_frame + ch * out_h * out_w * output->elem_size; kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe); } } return 0; }
MathTools.h
/** * * \copyright * Copyright (c) 2012-2017, OpenGeoSys Community (http://www.opengeosys.org) * Distributed under a Modified BSD License. * See accompanying file LICENSE.txt or * http://www.opengeosys.org/project/license * */ #pragma once #include <cstddef> #ifdef _OPENMP #include <omp.h> #endif #ifndef Q_MOC_RUN // to avoid Qt4 bug, https://bugreports.qt.io/browse/QTBUG-22829 #include <boost/math/constants/constants.hpp> #endif namespace MathLib { /** * standard inner product in R^N * \param v0 array of type T representing the vector * \param v1 array of type T representing the vector * */ template<typename T, int N> inline T scalarProduct(T const * const v0, T const * const v1) { T res (v0[0] * v1[0]); #ifdef _OPENMP OPENMP_LOOP_TYPE k; #pragma omp parallel for reduction (+:res) for (k = 1; k<N; k++) { res += v0[k] * v1[k]; } #else for (std::size_t k(1); k < N; k++) res += v0[k] * v1[k]; #endif return res; } template <> inline double scalarProduct<double,3>(double const * const v0, double const * const v1) { double res (v0[0] * v1[0]); for (std::size_t k(1); k < 3; k++) res += v0[k] * v1[k]; return res; } template<typename T> inline T scalarProduct(T const * const v0, T const * const v1, unsigned n) { T res (v0[0] * v1[0]); #ifdef _OPENMP OPENMP_LOOP_TYPE k; #pragma omp parallel for reduction (+:res) #ifdef WIN32 #pragma warning ( push ) #pragma warning ( disable: 4018 ) #endif for (k = 1; k<n; k++) { res += v0[k] * v1[k]; } #ifdef WIN32 #pragma warning ( pop ) #endif #else for (std::size_t k(1); k < n; k++) res += v0[k] * v1[k]; #endif return res; } /** * calcProjPntToLineAndDists computes the orthogonal projection * of a point p to the line described by the points a and b, * \f$g(\lambda) = a + \lambda (b - a)\f$, * the distance between p and the projected point * and the distances between the projected point and the end * points a, b of the line * \param p the (mesh) point * \param a first point of line * \param b second point of line * \param lambda the projected point described by the line equation above * \param d0 distance to the line point a * \returns the distance between p and the orthogonal projection of p */ double calcProjPntToLineAndDists(const double p[3], const double a[3], const double b[3], double &lambda, double &d0); /** squared dist between double arrays p0 and p1 (size of arrays is 3) */ inline double sqrDist(const double* p0, const double* p1) { const double v[3] = {p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]}; return scalarProduct<double,3>(v,v); } /** * Let \f$p_0, p_1, p_2 \in R^3\f$. The function getAngle * computes the angle between the edges \f$(p_0,p_1)\f$ and \f$(p_1,p_2)\f$ * @param p0 start point of edge 0 * @param p1 end point of edge 0 and start point of edge 1 * @param p2 end point of edge 1 * @return the angle between the edges */ double getAngle (const double p0[3], const double p1[3], const double p2[3]); /// converts the given degrees to radians inline double to_radians(double degrees) { return degrees*boost::math::constants::pi<double>()/180.; } template<typename Type> Type limitValueInInterval(const Type variable, const Type lower_bound, const Type upper_bound) { if (variable < lower_bound) return lower_bound; if (variable > upper_bound) return upper_bound; return variable; } } // namespace
TGV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2019 Daniil Kazantsev * Copyright 2019 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "TGV_core.h" /* C-OMP implementation of Primal-Dual denoising method for * Total Generilized Variation (TGV)-L2 model [1] (2D/3D case) * * Input Parameters: * 1. Noisy image/volume (2D/3D) * 2. lambda - regularisation parameter * 3. parameter to control the first-order term (alpha1) * 4. parameter to control the second-order term (alpha0) * 5. Number of Chambolle-Pock (Primal-Dual) iterations * 6. Lipshitz constant (default is 12) * 7. eplsilon: tolerance constant * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * References: * [1] K. Bredies "Total Generalized Variation" * */ float TGV_main(float *U0, float *U, float *infovector, float lambda, float alpha1, float alpha0, int iter, float L2, float epsil, int dimX, int dimY, int dimZ) { long DimTotal; int ll, j; float re, re1; re = 0.0f; re1 = 0.0f; int count = 0; float *U_old, *P1, *P2, *Q1, *Q2, *Q3, *V1, *V1_old, *V2, *V2_old, tau, sigma; DimTotal = (long)(dimX*dimY*dimZ); copyIm(U0, U, (long)(dimX), (long)(dimY), (long)(dimZ)); /* initialize */ tau = pow(L2,-0.5); sigma = pow(L2,-0.5); /* dual variables */ P1 = calloc(DimTotal, sizeof(float)); P2 = calloc(DimTotal, sizeof(float)); Q1 = calloc(DimTotal, sizeof(float)); Q2 = calloc(DimTotal, sizeof(float)); Q3 = calloc(DimTotal, sizeof(float)); U_old = calloc(DimTotal, sizeof(float)); V1 = calloc(DimTotal, sizeof(float)); V1_old = calloc(DimTotal, sizeof(float)); V2 = calloc(DimTotal, sizeof(float)); V2_old = calloc(DimTotal, sizeof(float)); if (dimZ == 1) { /*2D case*/ /* Primal-dual iterations begin here */ for(ll = 0; ll < iter; ll++) { /* Calculate Dual Variable P */ DualP_2D(U, V1, V2, P1, P2, (long)(dimX), (long)(dimY), sigma); /*Projection onto convex set for P*/ ProjP_2D(P1, P2, (long)(dimX), (long)(dimY), alpha1); /* Calculate Dual Variable Q */ DualQ_2D(V1, V2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), sigma); /*Projection onto convex set for Q*/ ProjQ_2D(Q1, Q2, Q3, (long)(dimX), (long)(dimY), alpha0); /*saving U into U_old*/ copyIm(U, U_old, (long)(dimX), (long)(dimY), 1l); /*adjoint operation -> divergence and projection of P*/ DivProjP_2D(U, U0, P1, P2, (long)(dimX), (long)(dimY), lambda, tau); /*get updated solution U*/ newU(U, U_old, (long)(dimX), (long)(dimY)); /*saving V into V_old*/ copyIm(V1, V1_old, (long)(dimX), (long)(dimY), 1l); copyIm(V2, V2_old, (long)(dimX), (long)(dimY), 1l); /* upd V*/ UpdV_2D(V1, V2, P1, P2, Q1, Q2, Q3, (long)(dimX), (long)(dimY), tau); /*get new V*/ newU(V1, V1_old, (long)(dimX), (long)(dimY)); newU(V2, V2_old, (long)(dimX), (long)(dimY)); /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(U[j] - U_old[j],2); re1 += powf(U[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } /*end of iterations*/ } else { /*3D case*/ float *P3, *Q4, *Q5, *Q6, *V3, *V3_old; P3 = calloc(DimTotal, sizeof(float)); Q4 = calloc(DimTotal, sizeof(float)); Q5 = calloc(DimTotal, sizeof(float)); Q6 = calloc(DimTotal, sizeof(float)); V3 = calloc(DimTotal, sizeof(float)); V3_old = calloc(DimTotal, sizeof(float)); /* Primal-dual iterations begin here */ for(ll = 0; ll < iter; ll++) { /* Calculate Dual Variable P */ DualP_3D(U, V1, V2, V3, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), sigma); /*Projection onto convex set for P*/ ProjP_3D(P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), alpha1); /* Calculate Dual Variable Q */ DualQ_3D(V1, V2, V3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), sigma); /*Projection onto convex set for Q*/ ProjQ_3D(Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), alpha0); /*saving U into U_old*/ copyIm(U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /*adjoint operation -> divergence and projection of P*/ DivProjP_3D(U, U0, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, tau); /*get updated solution U*/ newU3D(U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /*saving V into V_old*/ copyIm_3Ar(V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /* upd V*/ UpdV_3D(V1, V2, V3, P1, P2, P3, Q1, Q2, Q3, Q4, Q5, Q6, (long)(dimX), (long)(dimY), (long)(dimZ), tau); /*get new V*/ newU3D_3Ar(V1, V2, V3, V1_old, V2_old, V3_old, (long)(dimX), (long)(dimY), (long)(dimZ)); /* check early stopping criteria */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(U[j] - U_old[j],2); re1 += powf(U[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } /*end of iterations*/ free(P3);free(Q4);free(Q5);free(Q6);free(V3);free(V3_old); } /*freeing*/ free(P1);free(P2);free(Q1);free(Q2);free(Q3);free(U_old); free(V1);free(V2);free(V1_old);free(V2_old); /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ /*Calculating dual variable P (using forward differences)*/ float DualP_2D(float *U, float *V1, float *V2, float *P1, float *P2, long dimX, long dimY, float sigma) { long i,j, index; #pragma omp parallel for shared(U,V1,V2,P1,P2) private(i,j,index) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) P1[index] += sigma*(-V1[index]); else P1[index] += sigma*((U[j*dimX+(i+1)] - U[index]) - V1[index]); if (j == dimY-1) P2[index] += sigma*(-V2[index]); else P2[index] += sigma*((U[(j+1)*dimX+i] - U[index]) - V2[index]); }} return 1; } /*Projection onto convex set for P*/ float ProjP_2D(float *P1, float *P2, long dimX, long dimY, float alpha1) { float grad_magn; long i,j,index; #pragma omp parallel for shared(P1,P2) private(i,j,index,grad_magn) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; grad_magn = (sqrtf(pow(P1[index],2) + pow(P2[index],2)))/alpha1; if (grad_magn > 1.0f) { P1[index] /= grad_magn; P2[index] /= grad_magn; } }} return 1; } /*Calculating dual variable Q (using forward differences)*/ float DualQ_2D(float *V1, float *V2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float sigma) { long i,j,index; float q1, q2, q11, q22; #pragma omp parallel for shared(Q1,Q2,Q3,V1,V2) private(i,j,index,q1,q2,q11,q22) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; q1 = 0.0f; q11 = 0.0f; q2 = 0.0f; q22 = 0.0f; /* boundary conditions (Neuman) */ if (i != dimX-1){ q1 = V1[j*dimX+(i+1)] - V1[index]; q11 = V2[j*dimX+(i+1)] - V2[index]; } if (j != dimY-1) { q2 = V2[(j+1)*dimX+i] - V2[index]; q22 = V1[(j+1)*dimX+i] - V1[index]; } Q1[index] += sigma*(q1); Q2[index] += sigma*(q2); Q3[index] += sigma*(0.5f*(q11 + q22)); }} return 1; } float ProjQ_2D(float *Q1, float *Q2, float *Q3, long dimX, long dimY, float alpha0) { float grad_magn; long i,j,index; #pragma omp parallel for shared(Q1,Q2,Q3) private(i,j,index,grad_magn) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; grad_magn = sqrtf(pow(Q1[index],2) + pow(Q2[index],2) + 2*pow(Q3[index],2)); grad_magn = grad_magn/alpha0; if (grad_magn > 1.0f) { Q1[index] /= grad_magn; Q2[index] /= grad_magn; Q3[index] /= grad_magn; } }} return 1; } /* Divergence and projection for P (backward differences)*/ float DivProjP_2D(float *U, float *U0, float *P1, float *P2, long dimX, long dimY, float lambda, float tau) { long i,j,index; float P_v1, P_v2, div; #pragma omp parallel for shared(U,U0,P1,P2) private(i,j,index,P_v1,P_v2,div) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; if (i == 0) P_v1 = P1[index]; else if (i == dimX-1) P_v1 = -P1[j*dimX+(i-1)]; else P_v1 = P1[index] - P1[j*dimX+(i-1)]; if (j == 0) P_v2 = P2[index]; else if (j == dimY-1) P_v2 = -P2[(j-1)*dimX+i]; else P_v2 = P2[index] - P2[(j-1)*dimX+i]; div = P_v1 + P_v2; U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau); }} return *U; } /*get updated solution U*/ float newU(float *U, float *U_old, long dimX, long dimY) { long i; #pragma omp parallel for shared(U,U_old) private(i) for(i=0; i<dimX*dimY; i++) U[i] = 2*U[i] - U_old[i]; return *U; } /*get update for V (backward differences)*/ float UpdV_2D(float *V1, float *V2, float *P1, float *P2, float *Q1, float *Q2, float *Q3, long dimX, long dimY, float tau) { long i, j, index; float q1, q3_x, q3_y, q2, div1, div2; #pragma omp parallel for shared(V1,V2,P1,P2,Q1,Q2,Q3) private(i, j, index, q1, q3_x, q3_y, q2, div1, div2) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { index = j*dimX+i; /* boundary conditions (Neuman) */ if (i == 0) { q1 = Q1[index]; q3_x = Q3[index]; } else if (i == dimX-1) { q1 = -Q1[j*dimX+(i-1)]; q3_x = -Q3[j*dimX+(i-1)]; } else { q1 = Q1[index] - Q1[j*dimX+(i-1)]; q3_x = Q3[index] - Q3[j*dimX+(i-1)]; } if (j == 0) { q2 = Q2[index]; q3_y = Q3[index]; } else if (j == dimY-1) { q2 = -Q2[(j-1)*dimX+i]; q3_y = -Q3[(j-1)*dimX+i]; } else { q2 = Q2[index] - Q2[(j-1)*dimX+i]; q3_y = Q3[index] - Q3[(j-1)*dimX+i]; } div1 = q1 + q3_y; div2 = q3_x + q2; V1[index] += tau*(P1[index] + div1); V2[index] += tau*(P2[index] + div2); }} return 1; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ /*Calculating dual variable P (using forward differences)*/ float DualP_3D(float *U, float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float sigma) { long i,j,k, index; #pragma omp parallel for shared(U,V1,V2,V3,P1,P2,P3) private(i,j,k,index) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ if (i == dimX-1) P1[index] += sigma*(-V1[index]); else P1[index] += sigma*((U[(dimX*dimY)*k + j*dimX+(i+1)] - U[index]) - V1[index]); if (j == dimY-1) P2[index] += sigma*(-V2[index]); else P2[index] += sigma*((U[(dimX*dimY)*k + (j+1)*dimX+i] - U[index]) - V2[index]); if (k == dimZ-1) P3[index] += sigma*(-V3[index]); else P3[index] += sigma*((U[(dimX*dimY)*(k+1) + j*dimX+i] - U[index]) - V3[index]); }}} return 1; } /*Projection onto convex set for P*/ float ProjP_3D(float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float alpha1) { float grad_magn; long i,j,k,index; #pragma omp parallel for shared(P1,P2,P3) private(i,j,k,index,grad_magn) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; grad_magn = (sqrtf(pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2)))/alpha1; if (grad_magn > 1.0f) { P1[index] /= grad_magn; P2[index] /= grad_magn; P3[index] /= grad_magn; } }}} return 1; } /*Calculating dual variable Q (using forward differences)*/ float DualQ_3D(float *V1, float *V2, float *V3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float sigma) { long i,j,k,index; float q1, q2, q3, q11, q22, q33, q44, q55, q66; #pragma omp parallel for shared(Q1,Q2,Q3,Q4,Q5,Q6,V1,V2,V3) private(i,j,k,index,q1,q2,q3,q11,q22,q33,q44,q55,q66) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; q1 = 0.0f; q11 = 0.0f; q33 = 0.0f; q2 = 0.0f; q22 = 0.0f; q55 = 0.0f; q3 = 0.0f; q44 = 0.0f; q66 = 0.0f; /* symmetric boundary conditions (Neuman) */ if (i != dimX-1){ q1 = V1[(dimX*dimY)*k + j*dimX+(i+1)] - V1[index]; q11 = V2[(dimX*dimY)*k + j*dimX+(i+1)] - V2[index]; q33 = V3[(dimX*dimY)*k + j*dimX+(i+1)] - V3[index]; } if (j != dimY-1) { q2 = V2[(dimX*dimY)*k + (j+1)*dimX+i] - V2[index]; q22 = V1[(dimX*dimY)*k + (j+1)*dimX+i] - V1[index]; q55 = V3[(dimX*dimY)*k + (j+1)*dimX+i] - V3[index]; } if (k != dimZ-1) { q3 = V3[(dimX*dimY)*(k+1) + j*dimX+i] - V3[index]; q44 = V1[(dimX*dimY)*(k+1) + j*dimX+i] - V1[index]; q66 = V2[(dimX*dimY)*(k+1) + j*dimX+i] - V2[index]; } Q1[index] += sigma*(q1); /*Q11*/ Q2[index] += sigma*(q2); /*Q22*/ Q3[index] += sigma*(q3); /*Q33*/ Q4[index] += sigma*(0.5f*(q11 + q22)); /* Q21 / Q12 */ Q5[index] += sigma*(0.5f*(q33 + q44)); /* Q31 / Q13 */ Q6[index] += sigma*(0.5f*(q55 + q66)); /* Q32 / Q23 */ }}} return 1; } float ProjQ_3D(float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float alpha0) { float grad_magn; long i,j,k,index; #pragma omp parallel for shared(Q1,Q2,Q3,Q4,Q5,Q6) private(i,j,k,index,grad_magn) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; grad_magn = sqrtf(pow(Q1[index],2) + pow(Q2[index],2) + pow(Q3[index],2) + 2.0f*pow(Q4[index],2) + 2.0f*pow(Q5[index],2) + 2.0f*pow(Q6[index],2)); grad_magn = grad_magn/alpha0; if (grad_magn > 1.0f) { Q1[index] /= grad_magn; Q2[index] /= grad_magn; Q3[index] /= grad_magn; Q4[index] /= grad_magn; Q5[index] /= grad_magn; Q6[index] /= grad_magn; } }}} return 1; } /* Divergence and projection for P*/ float DivProjP_3D(float *U, float *U0, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float lambda, float tau) { long i,j,k,index; float P_v1, P_v2, P_v3, div; #pragma omp parallel for shared(U,U0,P1,P2,P3) private(i,j,k,index,P_v1,P_v2,P_v3,div) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; if (i == 0) P_v1 = P1[index]; else if (i == dimX-1) P_v1 = -P1[(dimX*dimY)*k + j*dimX+(i-1)]; else P_v1 = P1[index] - P1[(dimX*dimY)*k + j*dimX+(i-1)]; if (j == 0) P_v2 = P2[index]; else if (j == dimY-1) P_v2 = -P2[(dimX*dimY)*k + (j-1)*dimX+i]; else P_v2 = P2[index] - P2[(dimX*dimY)*k + (j-1)*dimX+i]; if (k == 0) P_v3 = P3[index]; else if (k == dimZ-1) P_v3 = -P3[(dimX*dimY)*(k-1) + (j)*dimX+i]; else P_v3 = P3[index] - P3[(dimX*dimY)*(k-1) + (j)*dimX+i]; div = P_v1 + P_v2 + P_v3; U[index] = (lambda*(U[index] + tau*div) + tau*U0[index])/(lambda + tau); }}} return *U; } /*get update for V*/ float UpdV_3D(float *V1, float *V2, float *V3, float *P1, float *P2, float *P3, float *Q1, float *Q2, float *Q3, float *Q4, float *Q5, float *Q6, long dimX, long dimY, long dimZ, float tau) { long i,j,k,index; float q1, q4x, q5x, q2, q4y, q6y, q6z, q5z, q3, div1, div2, div3; #pragma omp parallel for shared(V1,V2,V3,P1,P2,P3,Q1,Q2,Q3,Q4,Q5,Q6) private(i,j,k,index,q1,q4x,q5x,q2,q4y,q6y,q6z,q5z,q3,div1,div2,div3) for(i=0; i<dimX; i++) { for(j=0; j<dimY; j++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; q1 = 0.0f; q4x= 0.0f; q5x= 0.0f; q2= 0.0f; q4y= 0.0f; q6y= 0.0f; q6z= 0.0f; q5z= 0.0f; q3= 0.0f; /* Q1 - Q11, Q2 - Q22, Q3 - Q33, Q4 - Q21/Q12, Q5 - Q31/Q13, Q6 - Q32/Q23*/ /* symmetric boundary conditions (Neuman) */ if (i == 0) { q1 = Q1[index]; q4x = Q4[index]; q5x = Q5[index]; } else if (i == dimX-1) { q1 = -Q1[(dimX*dimY)*k + j*dimX+(i-1)]; q4x = -Q4[(dimX*dimY)*k + j*dimX+(i-1)]; q5x = -Q5[(dimX*dimY)*k + j*dimX+(i-1)]; } else { q1 = Q1[index] - Q1[(dimX*dimY)*k + j*dimX+(i-1)]; q4x = Q4[index] - Q4[(dimX*dimY)*k + j*dimX+(i-1)]; q5x = Q5[index] - Q5[(dimX*dimY)*k + j*dimX+(i-1)]; } if (j == 0) { q2 = Q2[index]; q4y = Q4[index]; q6y = Q6[index]; } else if (j == dimY-1) { q2 = -Q2[(dimX*dimY)*k + (j-1)*dimX+i]; q4y = -Q4[(dimX*dimY)*k + (j-1)*dimX+i]; q6y = -Q6[(dimX*dimY)*k + (j-1)*dimX+i]; } else { q2 = Q2[index] - Q2[(dimX*dimY)*k + (j-1)*dimX+i]; q4y = Q4[index] - Q4[(dimX*dimY)*k + (j-1)*dimX+i]; q6y = Q6[index] - Q6[(dimX*dimY)*k + (j-1)*dimX+i]; } if (k == 0) { q6z = Q6[index]; q5z = Q5[index]; q3 = Q3[index]; } else if (k == dimZ-1) { q6z = -Q6[(dimX*dimY)*(k-1) + (j)*dimX+i]; q5z = -Q5[(dimX*dimY)*(k-1) + (j)*dimX+i]; q3 = -Q3[(dimX*dimY)*(k-1) + (j)*dimX+i]; } else { q6z = Q6[index] - Q6[(dimX*dimY)*(k-1) + (j)*dimX+i]; q5z = Q5[index] - Q5[(dimX*dimY)*(k-1) + (j)*dimX+i]; q3 = Q3[index] - Q3[(dimX*dimY)*(k-1) + (j)*dimX+i]; } div1 = q1 + q4y + q5z; div2 = q4x + q2 + q6z; div3 = q5x + q6y + q3; V1[index] += tau*(P1[index] + div1); V2[index] += tau*(P2[index] + div2); V3[index] += tau*(P3[index] + div3); }}} return 1; } float copyIm_3Ar(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ) { long j; #pragma omp parallel for shared(V1, V2, V3, V1_old, V2_old, V3_old) private(j) for (j = 0; j<dimX*dimY*dimZ; j++) { V1_old[j] = V1[j]; V2_old[j] = V2[j]; V3_old[j] = V3[j]; } return 1; } /*get updated solution U*/ float newU3D(float *U, float *U_old, long dimX, long dimY, long dimZ) { long i; #pragma omp parallel for shared(U, U_old) private(i) for(i=0; i<dimX*dimY*dimZ; i++) U[i] = 2.0f*U[i] - U_old[i]; return *U; } /*get updated solution U*/ float newU3D_3Ar(float *V1, float *V2, float *V3, float *V1_old, float *V2_old, float *V3_old, long dimX, long dimY, long dimZ) { long i; #pragma omp parallel for shared(V1, V2, V3, V1_old, V2_old, V3_old) private(i) for(i=0; i<dimX*dimY*dimZ; i++) { V1[i] = 2.0f*V1[i] - V1_old[i]; V2[i] = 2.0f*V2[i] - V2_old[i]; V3[i] = 2.0f*V3[i] - V3_old[i]; } return 1; }
ft_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Fourier transformed AO pair * \int e^{-i Gv \cdot r} i(r) * j(r) dr^3 * * eval_gz, b, gxyz, gs: * - when eval_gz is GTO_Gv_uniform_orth * > b (reciprocal vectors) is diagonal 3x3 matrix * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of G-vectors along each direction (nGv=gs[0]*gs[1]*gs[2]). * - when eval_gz is GTO_Gv_uniform_nonorth * > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_general * only Gv is needed * - when eval_gz is GTO_Gv_nonuniform_orth * > b is the basic G value for each cartesian component * Gx = b[:gs[0]] * Gy = b[gs[0]:gs[0]+gs[1]] * Gz = b[gs[0]+gs[1]:] * > gs[3]: Number of basic G values along each direction. * > gxyz[3,nGv] are used to index the basic G value * > Gv is not used */ #include <stdlib.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #include "gto/ft_ao.h" #include "np_helper/np_helper.h" #define SQRTPI 1.7724538509055160272981674833411451 #define EXP_CUTOFF 100 #define NCTRMAX 72 double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); /* * Pyscf-1.5 (and older) use libcint function CINTinit_int1e_EnvVars and * CINTg1e_index_xyz. It's unsafe since the CINTEnvVars type was redefined * in ft_ao.h. Copy the contents of CINTinit_int1e_EnvVars and * CINTg1e_index_xyz here. */ #define IINC 0 #define JINC 1 #define GSHIFT 4 #define POS_E1 5 #define RYS_ROOTS 6 #define TENSOR 7 void GTO_ft_init1e_envs(CINTEnvVars *envs, int *ng, int *shls, int *atm, int natm, int *bas, int nbas, double *env) { envs->natm = natm; envs->nbas = nbas; envs->atm = atm; envs->bas = bas; envs->env = env; envs->shls = shls; const int i_sh = shls[0]; const int j_sh = shls[1]; envs->i_l = bas(ANG_OF, i_sh); envs->j_l = bas(ANG_OF, j_sh); envs->x_ctr[0] = bas(NCTR_OF, i_sh); envs->x_ctr[1] = bas(NCTR_OF, j_sh); envs->nfi = (envs->i_l+1)*(envs->i_l+2)/2; envs->nfj = (envs->j_l+1)*(envs->j_l+2)/2; envs->nf = envs->nfi * envs->nfj; envs->common_factor = 1; envs->gbits = ng[GSHIFT]; envs->ncomp_e1 = ng[POS_E1]; envs->ncomp_tensor = ng[TENSOR]; envs->li_ceil = envs->i_l + ng[IINC]; envs->lj_ceil = envs->j_l + ng[JINC]; if (ng[RYS_ROOTS] > 0) { envs->nrys_roots = ng[RYS_ROOTS]; } else { envs->nrys_roots = (envs->li_ceil + envs->lj_ceil)/2 + 1; } envs->ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh)); envs->rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh)); int dli, dlj; if (envs->li_ceil < envs->lj_ceil) { dli = envs->li_ceil + 1; dlj = envs->li_ceil + envs->lj_ceil + 1; } else { dli = envs->li_ceil + envs->lj_ceil + 1; dlj = envs->lj_ceil + 1; } envs->g_stride_i = 1; envs->g_stride_j = dli; envs->g_size = dli * dlj; envs->lk_ceil = 1; envs->ll_ceil = 1; envs->g_stride_k = 0; envs->g_stride_l = 0; } void CINTcart_comp(int *nx, int *ny, int *nz, const int lmax); static void _g2c_index_xyz(int *idx, const CINTEnvVars *envs) { int i_l = envs->i_l; int j_l = envs->j_l; int nfi = envs->nfi; int nfj = envs->nfj; int di = envs->g_stride_i; int dj = envs->g_stride_j; int i, j, n; int ofx, ofjx; int ofy, ofjy; int ofz, ofjz; int i_nx[CART_MAX], i_ny[CART_MAX], i_nz[CART_MAX]; int j_nx[CART_MAX], j_ny[CART_MAX], j_nz[CART_MAX]; CINTcart_comp(i_nx, i_ny, i_nz, i_l); CINTcart_comp(j_nx, j_ny, j_nz, j_l); ofx = 0; ofy = envs->g_size; ofz = envs->g_size * 2; n = 0; for (j = 0; j < nfj; j++) { ofjx = ofx + dj * j_nx[j]; ofjy = ofy + dj * j_ny[j]; ofjz = ofz + dj * j_nz[j]; for (i = 0; i < nfi; i++) { idx[n+0] = ofjx + di * i_nx[i]; idx[n+1] = ofjy + di * i_ny[i]; idx[n+2] = ofjz + di * i_nz[i]; n += 3; } } } static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; /* * _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D * recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i} * _DOWN_XYZ_ORDER i in i/2a * _DOWN2 index of f_{i-1} * _DOWN_XYZ index of X * _DOWN1 index of f_{i} */ static const int _DOWN1[] = { -1, 0, 0, 0, 0, 1, 2, 1, 2, 2, 0, 0, 0, 3, 4, 5, 3, 3, 5, 5, 0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, }; static const int _DOWN2[] = { -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, 1, -1, -1, 2, 0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104, }; static const int _DOWN_XYZ[] = { 2, 0, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 0, 0, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, }; static const int _DOWN_XYZ_ORDER[] = { 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4, 5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5, 6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6, 7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7, 8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11, 12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12, 13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(i) 0 #define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2]) #define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1) #define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m] #define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m] static int vrr1d_withGv(double complex *g, double *rijri, double aij, double *Gv, int topl, size_t NGv) { int cumxyz = 1; if (topl == 0) { return cumxyz; } double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; int i, n, m, l; double a2; double complex *p0, *p1, *p2, *dec1, *dec2; double *ka2 = malloc(sizeof(double) * NGv*3); double *kxa2 = ka2; double *kya2 = kxa2 + NGv; double *kza2 = kya2 + NGv; a2 = .5 / aij; for (n = 0; n < NGv; n++) { kxa2[n] = kx[n] * a2; kya2[n] = ky[n] * a2; kza2[n] = kz[n] * a2; } p0 = g + NGv; for (n = 0; n < NGv; n++) { p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n]; p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n]; p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n]; } cumxyz += 3; for (l = 1; l < topl; l++) { p0 = g + cumxyz * NGv; dec1 = p0 - _LEN_CART[l ] * NGv; dec2 = dec1 - _LEN_CART[l-1] * NGv; for (i = 0; i < _LEN_CART[l+1]; i++) { m = DEC1_XYZ(l+1,i); kxa2 = ka2 + m * NGv; p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv; p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv; if (ADDR_IF_L_DEC2(l+1,i) < 0) { for (n = 0; n < NGv; n++) { p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } else { a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i); for (n = 0; n < NGv; n++) { p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } p0 += NGv; } cumxyz += _LEN_CART[l+1]; } free(ka2); return cumxyz; } /* * if li = 3, lj = 1 * (10 + X*00 -> 01): * gs + X*fs -> fp */ static void vrr2d_ket_inc1_withGv(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { NPzcopy(out, g, _LEN_CART[li]*NGv); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } p01 += NGv; } } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } p01 += NGv; } } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } p01 += NGv; } } } /* * transpose i, j when storing into out */ static void vrr2d_inc1_swapij(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { NPzcopy(out, g, _LEN_CART[li]*NGv); return; } const int row_01 = _LEN_CART[lj]; const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } } out += NGv; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } } out += NGv; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } } } } static void vrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { vrr2d_ket_inc1_withGv(pg01, pg00, rirj, i, j, NGv); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_ket_inc1_withGv(out, g01, rirj, li, lj, NGv); } /* (0,li+lj) => (li,lj) */ static void hrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rjri[3]; rjri[0] = rj[0] - ri[0]; rjri[1] = rj[1] - ri[1]; rjri[2] = rj[2] - ri[2]; g00 = gbuf2; g01 = g; for (i = 1; i < li; i++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (j = lj; j <= nmax-i; j++) { vrr2d_ket_inc1_withGv(pg01, pg00, rjri, j, i, NGv); row_01 = _LEN_CART[j]; col_01 = _LEN_CART[i]; row_00 = _LEN_CART[j ]; col_00 = _LEN_CART[i-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv); } /* * Recursive relation */ static void aopair_rr_igtj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijri[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); vrr1d_withGv(g, rijri, aij, Gv, topl, NGv); } static void aopair_rr_iltj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijrj[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); vrr1d_withGv(g, rijrj, aij, Gv, topl, NGv); } static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nmax = envs->li_ceil + envs->lj_ceil; const int lj = envs->lj_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijri[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n, ptr; double ia2; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); if (nmax > 0) { for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * NGv; off1 = i * NGv; off2 = (i+1) * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (j = 1; j <= lj; j++) { ptr = dj * j; for (i = ptr; i <= ptr + nmax - j; i++) { off0 = i * NGv - dj * NGv; // [i, j-1] off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1] off2 = i * NGv; // [i, j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nmax = envs->li_ceil + envs->lj_ceil; const int li = envs->li_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijrj[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n; double ia2; rirj[0] = rj[0] - ri[0]; rirj[1] = rj[1] - ri[1]; rirj[2] = rj[2] - ri[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache); if (nmax > 0) { off0 = dj * NGv; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * dj * NGv; off1 = i * dj * NGv; off2 = (i+1) * dj * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (i = 1; i <= li; i++) { for (j = 0; j <= nmax - i; j++) { off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ] off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1] off2 = i * NGv + j * dj * NGv; // [i ,j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void inner_prod(double complex *g, double complex *gout, int *idx, const CINTEnvVars *envs, double *Gv, size_t NGv, int empty) { int ix, iy, iz, n, k; double complex *gz = g + envs->g_size * NGv * 2; if (empty) { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } else { gout[n*NGv+k] = 0; } } } } else { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } } } } } static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp, const int nprim, const int nctr, const double *coeff, int empty) { size_t n, i; double c; if (empty) { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; for (i = 0; i < nf; i++) { gc[i] = gp[i] * c; } gc += nf; } } else { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; if (c != 0) { for (i = 0; i < nf; i++) { gc[i] += gp[i] * c; } } gc += nf; } } } static void transpose(double complex *out, double complex *in, int nf, int comp, size_t NGv) { size_t n, k, ic; double complex *pin; for (ic = 0; ic < comp; ic++) { for (n = 0; n < nf; n++) { pin = in + (n*comp+ic) * NGv; for (k = 0; k < NGv; k++) { out[n*NGv+k] = pin[k]; } } out += nf * NGv; } } static const int _GBUFSIZE[] = { 1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384, 56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640, 120, 165, 243, 486, 816, 1360, 2040, 3030 }; #define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))] int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp, n; int empty[2] = {1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; const size_t len1 = bufsize(i_l,j_l) * NGv; const size_t leni = len1 * i_ctr; const size_t lenj = len1 * i_ctr * j_ctr; double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1)); double complex *g = gctrj + lenj; double complex *gctri, *g1d; if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g; g += leni; } g1d = g; void (*aopair_rr)(); int offset_g1d; if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_early; offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l]; } else { aopair_rr = aopair_rr_iltj_early; offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l]; } int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d; double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXP_CUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); fac1i = fac1j * dij; (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv, cache); prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv, i_prim, i_ctr, ci+ip, *iempty); *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri, j_prim,j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (!*jempty) { g1d = gctrj; for (n = 0; n < i_ctr*j_ctr; n++) { if (i_l >= j_l) { vrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } else { hrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } g1d += len_g1d * NGv; } } free(gctrj); return !*jempty; } int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp; int empty[3] = {1, 1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; int *gempty = empty + 2; const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv; const size_t leng = nf * n_comp * NGv; const size_t leni = nf * i_ctr * n_comp * NGv; size_t lenj = 0; if (n_comp > 1) { lenj = nf * i_ctr * j_ctr * n_comp * NGv; } double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj)); double complex *g1 = g + len1; double complex *gout, *gctri, *gctrj; if (n_comp == 1) { gctrj = gctr; } else { gctrj = g1; g1 += lenj; } if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g1; g1 += leni; } if (i_ctr == 1) { gout = gctri; gempty = iempty; } else { gout = g1; } void (*aopair_rr)(); if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_lazy; } else { aopair_rr = aopair_rr_iltj_lazy; } int *idx = malloc(sizeof(int) * nf * 3); _g2c_index_xyz(idx, envs); double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { envs->aj = aj[jp]; if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { envs->ai = ai[ip]; aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXP_CUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); if (i_ctr == 1) { fac1i = fac1j * dij * ci[ip]; } else { fac1i = fac1j * dij; } (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv, cache); (*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty); if (i_ctr > 1) { prim_to_ctr(gctri, nf*n_comp*NGv, gout, i_prim, i_ctr, ci+ip, *iempty); } *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri, j_prim, j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (n_comp > 1 && !*jempty) { transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv); } free(g); free(idx); return !*jempty; } void GTO_Gv_general(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; const double cutoff = EXP_CUTOFF * aij * 4; int n; double kR, kk; for (n = 0; n < NGv; n++) { kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2]; out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I); } else { out[n] = 0; } } } /* * Gv = dot(b.T,gxyz) + kpt * kk = dot(Gv, Gv) * kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt) * out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I); * * b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij}, * followed by 3*NGv floats for Gbase */ void GTO_Gv_orth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[1] = rij[1] * b[4]; br[2] = rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double *kkpool = cache; double *kkx = kkpool; double *kky = kkx + nx; double *kkz = kky + ny; double complex *zbuf = (double complex *)(kkz + nz); double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXP_CUTOFF * aij * 4; int n, ix, iy, iz; double Gr; for (n = 0; n < nx+ny+nz; n++) { kkpool[n] = -1; } for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (kkx[ix] < 0) { Gr = Gxbase[ix] * br[0] + kr[0]; kkx[ix] = .25 * kx[n]*kx[n] / aij; csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kky[iy] < 0) { Gr = Gybase[iy] * br[1] + kr[1]; kky[iy] = .25 * ky[n]*ky[n] / aij; csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkz[iz] < 0) { Gr = Gzbase[iz] * br[2] + kr[2]; kkz[iz] = .25 * kz[n]*kz[n] / aij; csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) { out[n] = csx[ix] * csy[iy] * csz[iz]; } else { out[n] = 0; } } } void GTO_Gv_nonorth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, double *cache) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[0] += rij[1] * b[1]; br[0] += rij[2] * b[2]; br[1] = rij[0] * b[3]; br[1] += rij[1] * b[4]; br[1] += rij[2] * b[5]; br[2] = rij[0] * b[6]; br[2] += rij[1] * b[7]; br[2] += rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex *zbuf = (double complex *)cache; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; int n; char *empty = (char *)(csz + nz); char *xempty = empty; char *yempty = xempty + nx; char *zempty = yempty + ny; for (n = 0; n < nx+ny+nz; n++) { empty[n] = 1; } int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXP_CUTOFF * aij * 4; int ix, iy, iz; double Gr, kk; for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (xempty[ix]) { Gr = Gxbase[ix] * br[0] + kr[0]; csx[ix] = cos(Gr)-sin(Gr)*_Complex_I; xempty[ix] = 0; } if (yempty[iy]) { Gr = Gybase[iy] * br[1] + kr[1]; csy[iy] = cos(Gr)-sin(Gr)*_Complex_I; yempty[iy] = 0; } if (zempty[iz]) { Gr = Gzbase[iz] * br[2] + kr[2]; csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I); zempty[iz] = 0; } out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz]; } else { out[n] = 0; } } } static void zcopy_ij(double complex *out, const double complex *gctr, const int mi, const int mj, const int ni, const size_t NGv) { int i, j, k; for (j = 0; j < mj; j++) { for (i = 0; i < mi; i++) { for (k = 0; k < NGv; k++) { out[i*NGv+k] = gctr[i*NGv+k]; } } out += ni * NGv; gctr += mi * NGv; } } void GTO_ft_c2s_cart(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int nfi = envs->nfi; const int nfj = envs->nfj; const int ni = nfi*i_ctr; const int nj = nfj*j_ctr; const int nf = envs->nf; int ic, jc; double complex *pout; for (jc = 0; jc < nj; jc += nfj) { for (ic = 0; ic < ni; ic += nfi) { pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv); gctr += nf * NGv; } } } #define C2S(sph, nket, cart, l) \ (double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l) #define OF_CMPLX 2 void GTO_ft_c2s_sph(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int di = i_l * 2 + 1; const int dj = j_l * 2 + 1; const int ni = di*i_ctr; const int nj = dj*j_ctr; const int nfi = envs->nfi; const int nf = envs->nf; int ic, jc, k; const int buflen = nfi*dj; double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv); double complex *buf2 = buf1 + buflen * NGv; double complex *pout, *pij, *buf; for (jc = 0; jc < nj; jc += dj) { for (ic = 0; ic < ni; ic += di) { buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l); pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l); for (k = NGv; k < dj*NGv; k+=NGv) { pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l); } pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, pij, di, dj, dims[0], NGv); gctr += nf * NGv; } } free(buf1); } static void _ft_zset0(double complex *out, int *dims, int *counts, int comp, size_t NGv) { double complex *pout; int i, j, k, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < counts[1]; j++) { pout = out + j * dims[0] * NGv; for (i = 0; i < counts[0]; i++) { for (k = 0; k < NGv; k++) { pout[i*NGv+k] = 0; } } } out += dims[0] * dims[1] * NGv; } } /************************************************* * * eval_aopair is one of GTO_aopair_early_contract, * GTO_aopair_lazy_contract * * eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth, * GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth * *************************************************/ int GTO_ft_aopair_drv(double complex *out, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, void (*f_c2s)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, CINTEnvVars *envs) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const size_t nc = envs->nf * i_ctr * j_ctr * NGv; double complex *gctr = malloc(sizeof(double complex) * nc * n_comp); double *cache = malloc(sizeof(double) * (gs[0] + gs[1] + gs[2]) * 3); if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { const int *shls = envs->shls; const int *bas = envs->bas; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, envs, eval_gz, fac, Gv, b, gxyz, gs, NGv, cache); int counts[4]; if (f_c2s == &GTO_ft_c2s_sph) { counts[0] = (envs->i_l*2+1) * i_ctr; counts[1] = (envs->j_l*2+1) * j_ctr; } else { // f_c2s == &GTO_ft_c2s_cart counts[0] = envs->nfi * i_ctr; counts[1] = envs->nfj * j_ctr; } if (dims == NULL) { dims = counts; } size_t nout = dims[0] * dims[1] * NGv; int n; if (has_value) { for (n = 0; n < n_comp; n++) { (*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv); } } else { _ft_zset0(out, dims, counts, n_comp, NGv); } free(gctr); free(cache); return has_value; } int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_cart, fac, Gv, b, gxyz, gs, nGv, &envs); } int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_sph, fac, Gv, b, gxyz, gs, nGv, &envs); } /************************************************* * *************************************************/ static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp && ish0 == jsh0 && ish1 == jsh1) { const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double complex *in = mat + off * NGv; double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] + (ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv; int i, j, n, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*nrow+i); pout = out + NGv * (i*nrow+j); for (n = 0; n < nGv; n++) { pout[n] = pin[n]; } } } out += nrow * ncol * NGv; } } } void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off0 = i0 * (i0 + 1) / 2; const size_t off = ip * (ip + 1) / 2 - off0 + jp; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; (*intor)(buf, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp) { zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } else { zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } } /* * Fourier transform AO pairs and add to mat (inplace) */ void GTO_ft_fill_drv(int (*intor)(), FPtr_eval_gz eval_gz, void (*fill)(), double complex *mat, int comp, int *shls_slice, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = cos(phase) + sin(phase)*_Complex_I; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * NCTRMAX*NCTRMAX*comp*(size_t)nGv); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_aopair, eval_gz, mat, comp, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } /* * Given npair of shls in shls_lst, FT their AO pair value and add to * out (inplace) */ void GTO_ft_fill_shls_drv(int (*intor)(), FPtr_eval_gz eval_gz, double complex *out, int comp, int npair, int *shls_lst, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int n, di, dj, ish, jsh; int *ijloc = malloc(sizeof(int) * npair); ijloc[0] = 0; for (n = 1; n < npair; n++) { ish = shls_lst[n*2-2]; jsh = shls_lst[n*2-1]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; ijloc[n] = ijloc[n-1] + di*dj; } const double complex fac = cos(phase) + sin(phase)*_Complex_I; const size_t NGv = nGv; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel private(n) { int ish, jsh; int dims[2]; #pragma omp for schedule(dynamic) for (n = 0; n < npair; n++) { ish = shls_lst[n*2 ]; jsh = shls_lst[n*2+1]; dims[0] = ao_loc[ish+1] - ao_loc[ish]; dims[1] = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } free(ijloc); } /* * Reversed vrr2d. They are used by numint_uniform_grid.c */ void GTOplain_vrr2d_ket_inc1(double *out, const double *g, double *rirj, int li, int lj) { if (lj == 0) { NPdcopy(out, g, _LEN_CART[li]); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double *g00 = g; const double *g10 = g + row_00*col_00; int i, j; const double *p00, *p10; double *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p01[i] = p10[0] + rirj[0] * p00[0]; } p01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p01[i] = p10[0] + rirj[1] * p00[0]; } p01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p01[i] = p10[0] + rirj[2] * p00[0]; } } } void GTOreverse_vrr2d_ket_inc1(double *g01, double *g00, double *rirj, int li, int lj) { const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; double *g10 = g00 + row_00*col_00; double *p00, *p10; int i, j; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[0]; } g01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[1]; } g01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[2]; } } }
openmp_ctx.h
#ifndef MOBULA_INC_CONTEXT_OPENMP_CTX_H_ #define MOBULA_INC_CONTEXT_OPENMP_CTX_H_ #include <omp.h> namespace mobula { #define KERNEL_RUN(a, n) (a) template <typename Func> MOBULA_DEVICE void parfor(const int n, Func F) { #pragma omp parallel for for (int i = 0; i < n; ++i) { F(i); } } } // namespace mobula #endif // MOBULA_INC_CONTEXT_OPENMP_CTX_H_
agmgMatrices.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus, Rajesh Gandham Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "agmg.h" csr * newCSRfromCOO(dlong N, hlong* globalRowStarts, dlong nnz, hlong *Ai, hlong *Aj, dfloat *Avals){ int size, rank; rank = agmg::rank; size = agmg::size; csr *A = (csr *) calloc(1,sizeof(csr)); A->Nrows = N; A->Ncols = N; A->NlocalCols = N; hlong globalOffset = globalRowStarts[rank]; //first, count number of local, and non-local non-zeros dlong diagNNZ=0; dlong offdNNZ=0; for (dlong n=0;n<nnz;n++) { if ((Aj[n] < globalOffset) || (Aj[n]>globalOffset+N-1)) offdNNZ++; else diagNNZ++; } dlong *diagAi, *diagAj; dlong *offdAi; hlong *offdAj; dfloat *diagAvals, *offdAvals; if (diagNNZ) { diagAi = (dlong *) calloc(diagNNZ, sizeof(dlong)); diagAj = (dlong *) calloc(diagNNZ, sizeof(dlong)); diagAvals = (dfloat *) calloc(diagNNZ, sizeof(dfloat)); } if (offdNNZ) { offdAi = (dlong *) calloc(offdNNZ, sizeof(dlong)); offdAj = (hlong *) calloc(offdNNZ, sizeof(hlong)); offdAvals = (dfloat *) calloc(offdNNZ, sizeof(dfloat)); } //split into local and non-local COO matrices diagNNZ =0; offdNNZ =0; for (dlong n=0;n<nnz;n++) { if ((Aj[n] < globalOffset) || (Aj[n]>globalOffset+N-1)) { offdAi[offdNNZ] = (dlong) Ai[n] - globalOffset; //local index offdAj[offdNNZ] = Aj[n]; //global index offdAvals[offdNNZ] = Avals[n]; offdNNZ++; } else { diagAi[diagNNZ] = (dlong) Ai[n] - globalOffset; //local index diagAj[diagNNZ] = (dlong) Aj[n] - globalOffset; //local index diagAvals[diagNNZ] = Avals[n]; diagNNZ++; } } A->diagNNZ = diagNNZ; A->offdNNZ = offdNNZ; if (N) { A->diagRowStarts = (dlong *) calloc(N+1,sizeof(dlong)); A->offdRowStarts = (dlong *) calloc(N+1,sizeof(dlong)); } if (diagNNZ) { A->diagCols = (dlong *) calloc(diagNNZ, sizeof(dlong)); A->diagCoefs = (dfloat *) calloc(diagNNZ, sizeof(dfloat)); } hlong* offdCols; if (offdNNZ) { offdCols = (hlong *) calloc(offdNNZ,sizeof(hlong)); A->offdCols = (dlong *) calloc(offdNNZ,sizeof(dlong)); A->offdCoefs = (dfloat *) calloc(offdNNZ, sizeof(dfloat)); } // Convert to csr storage, assumes orginal matrix was presorted by rows for(dlong n=0;n<diagNNZ;++n) { dlong row = diagAi[n]; A->diagRowStarts[row+1]++; } for(dlong n=0;n<offdNNZ;++n) { dlong row = offdAi[n]; A->offdRowStarts[row+1]++; } //cumulative sum for (dlong i=0;i<A->Nrows;i++) { A->diagRowStarts[i+1] += A->diagRowStarts[i]; A->offdRowStarts[i+1] += A->offdRowStarts[i]; } //copy input data into struct if (diagNNZ) { for (dlong i=0; i<N; i++) { dlong start = A->diagRowStarts[i]; int cnt = 1; for (dlong j=A->diagRowStarts[i]; j<A->diagRowStarts[i+1]; j++) { if (diagAj[j] == i) { //move diagonal to first entry A->diagCols[start] = diagAj[j]; A->diagCoefs[start] = diagAvals[j]; } else { A->diagCols[start+cnt] = diagAj[j]; A->diagCoefs[start+cnt] = diagAvals[j]; cnt++; } } } } //record global indexing of columns A->colMap = (hlong *) calloc(A->Ncols, sizeof(hlong)); for (dlong i=0;i<A->Ncols;i++) A->colMap[i] = i + globalOffset; if (offdNNZ) { for (dlong i=0; i<N; i++) { dlong start = A->offdRowStarts[i]; int cnt = 0; for (dlong j=A->offdRowStarts[i]; j<A->offdRowStarts[i+1]; j++) { offdCols[start+cnt] = offdAj[j]; A->offdCoefs[start+cnt] = offdAvals[j]; cnt++; } } //we now need to reorder the x vector for the halo, and shift the column indices hlong *col = (hlong *) calloc(A->offdNNZ,sizeof(hlong)); for (dlong n=0;n<offdNNZ;n++) col[n] = offdCols[n]; //copy non-local column global ids //sort by global index std::sort(col,col+offdNNZ); //count unique non-local column ids A->NHalo = 0; for (dlong n=1;n<offdNNZ;n++) if (col[n]!=col[n-1]) col[++A->NHalo] = col[n]; A->NHalo++; //number of unique columns A->Ncols += A->NHalo; //save global column ids in colMap A->colMap = (hlong *) realloc(A->colMap, A->Ncols*sizeof(hlong)); for (dlong n=0; n<A->NHalo; n++) A->colMap[n+A->NlocalCols] = col[n]; free(col); //shift the column indices to local indexing for (dlong n=0;n<offdNNZ;n++) { hlong gcol = offdCols[n]; for (dlong m=A->NlocalCols;m<A->Ncols;m++) { if (gcol == A->colMap[m]) A->offdCols[n] = m; } } } if (diagNNZ) { free(diagAi); free(diagAj); free(diagAvals); } if (offdNNZ) { free(offdAi); free(offdAj); free(offdAvals); free(offdCols); } csrHaloSetup(A,globalRowStarts); return A; } void freeCSR(csr *A) { if (A->diagNNZ) { free(A->diagRowStarts); free(A->diagCols); free(A->diagCoefs); } if (A->offdNNZ) { free(A->offdRowStarts); free(A->offdCols); free(A->offdCoefs); } if (A->Ncols) { free(A->colMap); } free(A->haloSendRequests); free(A->haloRecvRequests); free(A->NsendPairs); free(A->NrecvPairs); if (A->NsendTotal) { free(A->sendBuffer); free(A->haloElementList); } free(A); } //create a device version of a coo matrix dcoo *newDCOO(parAlmond_t *parAlmond, csr *B){ dcoo *A = (dcoo *) calloc(1,sizeof(dcoo)); A->Nrows = B->Nrows; A->Ncols = B->Ncols; A->NHalo = B->NHalo; A->NlocalCols = B->NlocalCols; A->diagNNZ = B->diagNNZ; A->offdNNZ = B->offdNNZ; dlong *diagRows; dlong *offdRows; if (B->diagNNZ) diagRows = (dlong *) calloc(B->diagNNZ,sizeof(dlong)); if (B->offdNNZ) offdRows = (dlong *) calloc(B->offdNNZ,sizeof(dlong)); dlong diagCnt =0; dlong offdCnt =0; for (dlong i=0;i<B->Nrows;i++) { for (dlong j=B->diagRowStarts[i];j<B->diagRowStarts[i+1];j++) diagRows[diagCnt++] = i; for (dlong j=B->offdRowStarts[i];j<B->offdRowStarts[i+1];j++) offdRows[offdCnt++] = i; } //copy to device if(B->diagNNZ){ A->o_diagRows = parAlmond->device.malloc(A->diagNNZ*sizeof(dlong), diagRows); A->o_diagCols = parAlmond->device.malloc(A->diagNNZ*sizeof(dlong), B->diagCols); A->o_diagCoefs = parAlmond->device.malloc(A->diagNNZ*sizeof(dfloat), B->diagCoefs); } if(B->offdNNZ){ A->o_offdRows = parAlmond->device.malloc(A->offdNNZ*sizeof(dlong), offdRows); A->o_offdCols = parAlmond->device.malloc(A->offdNNZ*sizeof(dlong), B->offdCols); A->o_offdCoefs = parAlmond->device.malloc(A->offdNNZ*sizeof(dfloat), B->offdCoefs); } A->NrecvTotal = B->NrecvTotal; A->NsendTotal = B->NsendTotal; A->haloElementList = B->haloElementList; if (A->NsendTotal) A->o_haloElementList = parAlmond->device.malloc(A->NsendTotal*sizeof(dlong),A->haloElementList); A->NsendPairs = B->NsendPairs; A->NrecvPairs = B->NrecvPairs; A->NsendMessages = B->NsendMessages; A->NrecvMessages = B->NrecvMessages; if (A->NrecvTotal) A->recvBuffer = (dfloat *) malloc(A->NrecvTotal*sizeof(dfloat)); if (A->NsendTotal) { #if 0 occa::memory o_haloBuffer = parAlmond->device.mappedAlloc(A->NsendTotal*sizeof(dfloat), NULL); A->sendBuffer = (dfloat*) o_haloBuffer.getMappedPointer(); #endif A->sendBuffer = (dfloat*) occaHostMallocPinned(parAlmond->device, A->NsendTotal*sizeof(dfloat), NULL, A->o_haloBuffer); } A->haloSendRequests = B->haloSendRequests; A->haloRecvRequests = B->haloRecvRequests; return A; } hyb * newHYB(parAlmond_t *parAlmond, csr *csrA) { hyb *A = (hyb *) calloc(1,sizeof(hyb)); A->Nrows = csrA->Nrows; A->Ncols = csrA->Ncols; A->NlocalCols = csrA->NlocalCols; A->NHalo = csrA->NHalo; int *rowCounters; if (csrA->Nrows) rowCounters = (int*) calloc(csrA->Nrows, sizeof(int)); int maxNnzPerRow = 0; int minNnzPerRow = 0; if (csrA->Nrows) minNnzPerRow = (int) csrA->diagRowStarts[1] - csrA->diagRowStarts[0]; for(dlong i=0; i<csrA->Nrows; i++) { int rowNnz = (int) csrA->diagRowStarts[i+1] - csrA->diagRowStarts[i]; rowCounters[i] = rowNnz; maxNnzPerRow = (rowNnz > maxNnzPerRow) ? rowNnz : maxNnzPerRow; minNnzPerRow = (rowNnz < minNnzPerRow) ? rowNnz : minNnzPerRow; } // create bins int numBins = maxNnzPerRow - minNnzPerRow + 1; //zero row check if (numBins<0) numBins =0; int *bins; if (numBins) bins = (int *) calloc(numBins, sizeof(int)); for(dlong i=0; i<csrA->Nrows; i++){ bins[rowCounters[i]-minNnzPerRow]++; } dfloat threshold = 2.0/3.0; dlong totalNNZ = csrA->diagNNZ+csrA->offdNNZ; int nnzPerRow = 0; dlong nnz = 0; //increase the nnz per row in E until it holds threshold*totalnnz nonzeros for(int i=0; i<numBins; i++){ nnz += bins[i] * (i+minNnzPerRow); if((nnz > threshold*totalNNZ)||(i==numBins-1)){ nnzPerRow = i+minNnzPerRow; break; } } A->E = (ell *) calloc(1, sizeof(ell)); A->E->Nrows = csrA->Nrows; A->E->Ncols = csrA->Ncols; A->E->nnzPerRow = nnzPerRow; A->E->strideLength = csrA->Nrows; dlong *Ecols; dfloat *Ecoefs; if(nnzPerRow&&csrA->Nrows){ Ecols = (dlong *) calloc(csrA->Nrows*nnzPerRow, sizeof(dlong)); Ecoefs = (dfloat *) calloc(csrA->Nrows*nnzPerRow, sizeof(dfloat)); } dlong nnzC = 0; // count the number of nonzeros to be stored in coo format for(dlong i=0; i<csrA->Nrows; i++) { //excess from row in diag if(rowCounters[i] > nnzPerRow) nnzC += (rowCounters[i] - nnzPerRow); //all of offd int offdRowNnz = (int) csrA->offdRowStarts[i+1]-csrA->offdRowStarts[i]; nnzC += offdRowNnz; } A->E->actualNNZ = totalNNZ - nnzC; A->C = (coo *) calloc(1, sizeof(coo)); A->C->Nrows = csrA->Nrows; A->C->Ncols = csrA->Ncols; A->C->nnz = nnzC; dlong *Coffsets; dlong *Ccols; dfloat *Ccoefs; Coffsets = (dlong *) calloc(csrA->Nrows+1, sizeof(dlong)); if (nnzC) { Ccols = (dlong *) calloc(nnzC, sizeof(dlong)); Ccoefs = (dfloat *) calloc(nnzC, sizeof(dfloat)); } nnzC = 0; for(dlong i=0; i<csrA->Nrows; i++){ dlong Jstart = csrA->diagRowStarts[i]; dlong Jend = csrA->diagRowStarts[i+1]; int rowNnz = (int) Jend - Jstart; // store only min of nnzPerRow and rowNnz int maxNnz = (nnzPerRow >= rowNnz) ? rowNnz : nnzPerRow; for(int c=0; c<maxNnz; c++){ Ecols [i+c*A->E->strideLength] = csrA->diagCols[Jstart+c]; Ecoefs[i+c*A->E->strideLength] = csrA->diagCoefs[Jstart+c]; } // store the remaining in coo format if(rowNnz > nnzPerRow){ for(int c=nnzPerRow; c<rowNnz; c++){ Coffsets[i+1]++; Ccols[nnzC] = csrA->diagCols[Jstart+c]; Ccoefs[nnzC] = csrA->diagCoefs[Jstart+c]; nnzC++; } } //add the offd non-zeros for (dlong j=csrA->offdRowStarts[i];j<csrA->offdRowStarts[i+1];j++) { Coffsets[i+1]++; Ccols[nnzC] = csrA->offdCols[j]; Ccoefs[nnzC] = csrA->offdCoefs[j]; nnzC++; } } //use counts to create offsets for (dlong i=0;i<csrA->Nrows;i++) Coffsets[i+1] += Coffsets[i]; // copy the data to device memory if(csrA->Nrows) { free(rowCounters); free(bins); } //copy null vector if present if(csrA->null&&csrA->Nrows) A->o_null = parAlmond->device.malloc(csrA->Nrows*sizeof(dfloat), csrA->null); if (csrA->diagInv&&csrA->Nrows) A->o_diagInv = parAlmond->device.malloc(csrA->Nrows*sizeof(dfloat), csrA->diagInv); if(A->E->nnzPerRow&&csrA->Nrows){ A->E->o_cols = parAlmond->device.malloc(csrA->Nrows*nnzPerRow*sizeof(dlong), Ecols); A->E->o_coefs = parAlmond->device.malloc(csrA->Nrows*nnzPerRow*sizeof(dfloat), Ecoefs); free(Ecols); free(Ecoefs); } if(A->C->nnz){ A->C->o_offsets = parAlmond->device.malloc((csrA->Nrows+1)*sizeof(dlong), Coffsets); A->C->o_cols = parAlmond->device.malloc(A->C->nnz*sizeof(dlong), Ccols); A->C->o_coefs = parAlmond->device.malloc(A->C->nnz*sizeof(dfloat), Ccoefs); free(Ccols); free(Ccoefs); } free(Coffsets); A->NrecvTotal = csrA->NrecvTotal; A->NsendTotal = csrA->NsendTotal; A->haloElementList = csrA->haloElementList; if (A->NsendTotal) A->o_haloElementList = parAlmond->device.malloc(A->NsendTotal*sizeof(dlong),A->haloElementList); A->NsendPairs = csrA->NsendPairs; A->NrecvPairs = csrA->NrecvPairs; A->NsendMessages = csrA->NsendMessages; A->NrecvMessages = csrA->NrecvMessages; A->haloSendRequests = csrA->haloSendRequests; A->haloRecvRequests = csrA->haloRecvRequests; if (A->NrecvTotal) A->recvBuffer = (dfloat *) malloc(A->NrecvTotal*sizeof(dfloat)); if (A->NsendTotal) { #if 0 occa::memory o_haloBuffer = parAlmond->device.mappedAlloc(A->NsendTotal*sizeof(dfloat), NULL); A->sendBuffer = (dfloat*) o_haloBuffer.getMappedPointer(); #endif A->sendBuffer = (dfloat*) occaHostMallocPinned(parAlmond->device, A->NsendTotal*sizeof(dfloat), NULL, A->o_haloBuffer); } return A; } void axpy(csr *A, dfloat alpha, dfloat *x, dfloat beta, dfloat *y, bool nullSpace, dfloat nullSpacePenalty) { dfloat alphaG = 0.; if (A->NsendTotal + A->NrecvTotal) csrHaloExchangeStart(A, sizeof(dfloat), x, A->sendBuffer, x+A->NlocalCols); // y[i] = beta*y[i] + alpha* (sum_{ij} Aij*x[j]) #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ //local dfloat result = 0.0; for(dlong jj=A->diagRowStarts[i]; jj<A->diagRowStarts[i+1]; jj++) result += (A->diagCoefs[jj]*x[A->diagCols[jj]]); y[i] = alpha*result + beta*y[i]; } //rank 1 correction if there is a nullspace if (nullSpace) { dfloat alphaL = innerProd(A->Nrows, A->null, x); MPI_Allreduce(&alphaL, &alphaG, 1, MPI_DFLOAT, MPI_SUM, agmg::comm); alphaG *= nullSpacePenalty; } if (A->NsendTotal + A->NrecvTotal) csrHaloExchangeFinish(A); #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ //nonlocal dfloat result = 0.0; for(dlong jj=A->offdRowStarts[i]; jj<A->offdRowStarts[i+1]; jj++) result += (A->offdCoefs[jj]*x[A->offdCols[jj]]); y[i] += alpha*result; } //add the correction if (nullSpace) vectorAdd(A->Nrows, alpha*alphaG, A->null, 1., y); } void axpy(parAlmond_t *parAlmond, dcoo *A, dfloat alpha, occa::memory o_x, dfloat beta, occa::memory o_y) { occaTimerTic(parAlmond->device,"dcoo axpy"); if (A->NsendTotal) { parAlmond->device.finish(); parAlmond->device.setStream(parAlmond->dataStream); parAlmond->haloExtract(A->NsendTotal, 1, A->o_haloElementList, o_x, A->o_haloBuffer); //copy from device A->o_haloBuffer.copyTo(A->sendBuffer,"async: true"); parAlmond->device.setStream(parAlmond->defaultStream); } if (A->NsendTotal + A->NrecvTotal){ parAlmond->device.setStream(parAlmond->dataStream); parAlmond->device.finish(); dcooHaloExchangeStart(A, sizeof(dfloat), A->sendBuffer, A->recvBuffer); parAlmond->device.setStream(parAlmond->defaultStream); } if (A->diagNNZ) parAlmond->agg_interpolateKernel(A->diagNNZ, A->o_diagRows, A->o_diagCols, A->o_diagCoefs, o_x, o_y); if (A->NsendTotal + A->NrecvTotal) dcooHaloExchangeFinish(A); //copy back to device if(A->NrecvTotal){ parAlmond->device.setStream(parAlmond->dataStream); o_x.copyFrom(A->recvBuffer,A->NrecvTotal*sizeof(dfloat),A->NlocalCols*sizeof(dfloat),"async: true"); parAlmond->device.finish(); parAlmond->device.setStream(parAlmond->defaultStream); parAlmond->device.finish(); } if (A->offdNNZ) parAlmond->agg_interpolateKernel(A->offdNNZ, A->o_offdRows, A->o_offdCols, A->o_offdCoefs, o_x, o_y); occaTimerToc(parAlmond->device,"dcoo axpy"); } void axpy(parAlmond_t *parAlmond, hyb *A, dfloat alpha, occa::memory o_x, dfloat beta, occa::memory o_y, bool nullSpace, dfloat nullSpacePenalty) { dfloat alphaG = 0.; occaTimerTic(parAlmond->device,"hyb axpy"); if (A->NsendTotal) { parAlmond->device.finish(); parAlmond->device.setStream(parAlmond->dataStream); parAlmond->haloExtract(A->NsendTotal, 1, A->o_haloElementList, o_x, A->o_haloBuffer); //copy from device A->o_haloBuffer.copyTo(A->sendBuffer,"async: true"); parAlmond->device.setStream(parAlmond->defaultStream); } // y <-- alpha*E*x+beta*y axpy(parAlmond, A->E, alpha, o_x, beta, o_y); if (A->NsendTotal+A->NrecvTotal){ parAlmond->device.setStream(parAlmond->dataStream); parAlmond->device.finish(); hybHaloExchangeStart(A, sizeof(dfloat),A->sendBuffer, A->recvBuffer); parAlmond->device.setStream(parAlmond->defaultStream); } //rank 1 correction if there is a nullspace if (nullSpace) { dfloat alphaL = innerProd(parAlmond, A->Nrows, A->o_null, o_x); MPI_Allreduce(&alphaL, &alphaG, 1, MPI_DFLOAT, MPI_SUM, agmg::comm); alphaG *= nullSpacePenalty; } if (A->NsendTotal+A->NrecvTotal) hybHaloExchangeFinish(A); //copy back to device if (A->NrecvTotal){ parAlmond->device.setStream(parAlmond->dataStream); o_x.copyFrom(A->recvBuffer,A->NrecvTotal*sizeof(dfloat),A->NlocalCols*sizeof(dfloat),"async: true"); parAlmond->device.finish(); parAlmond->device.setStream(parAlmond->defaultStream); parAlmond->device.finish(); } // y <-- alpha*C*x + y if (A->C->nnz) ax(parAlmond, A->C, alpha, o_x, o_y); //add the correction if (nullSpace) vectorAdd(parAlmond, A->Nrows, alpha*alphaG, A->o_null, 1., o_y); occaTimerToc(parAlmond->device,"hyb axpy"); } void axpy(parAlmond_t *parAlmond, ell *A, dfloat alpha, occa::memory o_x, dfloat beta, occa::memory o_y) { if(A->actualNNZ){ occaTimerTic(parAlmond->device,"ell axpy"); parAlmond->ellAXPYKernel(A->Nrows, A->nnzPerRow, A->strideLength, alpha, beta, A->o_cols, A->o_coefs, o_x, o_y); occaTimerToc(parAlmond->device,"ell axpy"); } } void ax(parAlmond_t *parAlmond, coo *C, dfloat alpha, occa::memory o_x, occa::memory o_y) { // do block-wise product if(C->nnz){ occaTimerTic(parAlmond->device,"coo ax"); parAlmond->cooAXKernel(C->Nrows, alpha, C->o_offsets, C->o_cols, C->o_coefs,o_x, o_y); occaTimerToc(parAlmond->device,"coo ax"); } } void smoothJacobi(parAlmond_t *parAlmond, agmgLevel *level, csr *A, dfloat *r, dfloat *x, bool x_is_zero) { // x = x + inv(D)*(b-A*x) if(x_is_zero){ #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ x[i] = A->diagInv[i]*r[i]; } return; } dfloat *res = level->smootherResidual; #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ res[i] = r[i]; } axpy(A, -1.0, x, 1.0, res,parAlmond->nullSpace,parAlmond->nullSpacePenalty); // update x #pragma omp parallel for for (dlong i=0;i<A->Nrows;i++) x[i] = x[i] + A->diagInv[i]*res[i]; } void smoothDampedJacobi(parAlmond_t *parAlmond, agmgLevel *level, csr *A, dfloat *r, dfloat *x, bool x_is_zero) { // dfloat alphaG = 0.; dfloat alpha = level->smoother_params[0]; if(x_is_zero){ #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ x[i] = alpha*A->diagInv[i]*r[i]; } return; } dfloat *res = level->smootherResidual; #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ res[i] = r[i]; } axpy(A, -1.0, x, 1.0, res,parAlmond->nullSpace,parAlmond->nullSpacePenalty); // copy the buffer vector to x #pragma omp parallel for for (dlong i=0;i<A->Nrows;i++) x[i] = x[i] + alpha*A->diagInv[i]*res[i]; } void smoothChebyshev(parAlmond_t *parAlmond, agmgLevel *level, csr *A, dfloat *r, dfloat *x, bool x_is_zero) { dfloat lambdaN = level->smoother_params[0]; dfloat lambda1 = level->smoother_params[1]; dfloat theta = 0.5*(lambdaN+lambda1); dfloat delta = 0.5*(lambdaN-lambda1); dfloat invTheta = 1.0/theta; dfloat sigma = theta/delta; dfloat rho_n = 1./sigma; dfloat rho_np1; dfloat *res = level->smootherResidual; dfloat *Ad = level->smootherResidual2; dfloat *d = level->smootherUpdate; // dfloat alphaG = 0.; if(x_is_zero){ //skip the Ax if x is zero #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ res[i] = A->diagInv[i]*r[i]; x[i] = 0.; d[i] = invTheta*res[i]; } } else { level->Ax(level->AxArgs,x,res); #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++){ res[i] = A->diagInv[i]*(r[i]-res[i]); d[i] = invTheta*res[i]; } } for (int k=0;k<level->ChebyshevIterations;k++) { //x_k+1 = x_k + d_k vectorAdd(A->Nrows, 1.0, d, 1.0, x); //r_k+1 = r_k - D^{-1}Ad_k level->Ax(level->AxArgs,d,Ad); #pragma omp parallel for for(dlong i=0; i<A->Nrows; i++) { res[i] = res[i] - A->diagInv[i]*Ad[i]; } rho_np1 = 1.0/(2.*sigma-rho_n); //d_k+1 = rho_k+1*rho_k*d_k + 2*rho_k+1*r_k+1/delta vectorAdd(A->Nrows, 2.0*rho_np1/delta, res, rho_np1*rho_n, d); rho_n = rho_np1; } //x_k+1 = x_k + d_k vectorAdd(A->Nrows, 1.0, d, 1.0, x); } void smoothJacobi(parAlmond_t *parAlmond, agmgLevel *level, hyb *A, occa::memory o_r, occa::memory o_x, bool x_is_zero) { // dfloat alphaG = 0.; occaTimerTic(parAlmond->device,"hyb smoothJacobi"); if(x_is_zero){ if (A->Nrows) dotStar(parAlmond, A->Nrows, 1.0, A->o_diagInv, o_r, 0.0, o_x); occaTimerToc(parAlmond->device,"hyb smoothJacobi"); return; } occa::memory o_res = level->o_smootherResidual; o_res.copyFrom(o_r,A->Nrows*sizeof(dfloat)); axpy(parAlmond, A, -1.0, o_x, 1.0, o_res,parAlmond->nullSpace,parAlmond->nullSpacePenalty); // x = x + inv(D)*(r-A*x) dotStar(parAlmond, A->Nrows, 1.0, A->o_diagInv, o_res, 1.0, o_x); occaTimerToc(parAlmond->device,"hyb smoothJacobi"); } void smoothDampedJacobi(parAlmond_t *parAlmond, agmgLevel *level, hyb *A, occa::memory o_r, occa::memory o_x, bool x_is_zero){ // dfloat alphaG = 0.; dfloat alpha = level->smoother_params[0]; occaTimerTic(parAlmond->device,"hyb smoothDampedJacobi"); if(x_is_zero){ if (A->Nrows) dotStar(parAlmond, A->Nrows, alpha, A->o_diagInv, o_r, 0.0, o_x); occaTimerToc(parAlmond->device,"hyb smoothDampedJacobi"); return; } occa::memory o_res = level->o_smootherResidual; o_res.copyFrom(o_r,A->Nrows*sizeof(dfloat)); axpy(parAlmond, A, -1.0, o_x, 1.0, o_res,parAlmond->nullSpace,parAlmond->nullSpacePenalty); // x = x + alpha*inv(D)*(r-A*x) dotStar(parAlmond, A->Nrows, alpha, A->o_diagInv, o_res, 1.0, o_x); occaTimerToc(parAlmond->device,"hyb smoothDampedJacobi"); } void smoothChebyshev(parAlmond_t *parAlmond, agmgLevel *level, hyb *A, occa::memory o_r, occa::memory o_x, bool x_is_zero) { dfloat lambdaN = level->smoother_params[0]; dfloat lambda1 = level->smoother_params[1]; dfloat theta = 0.5*(lambdaN+lambda1); dfloat delta = 0.5*(lambdaN-lambda1); dfloat invTheta = 1.0/theta; dfloat sigma = theta/delta; dfloat rho_n = 1./sigma; dfloat rho_np1; occa::memory o_res = level->o_smootherResidual; occa::memory o_Ad = level->o_smootherResidual2; occa::memory o_d = level->o_smootherUpdate; // dfloat alphaG = 0.; occaTimerTic(parAlmond->device,"hyb smoothChebyshev"); if(x_is_zero){ //skip the Ax if x is zero //res = D^{-1}r dotStar(parAlmond, A->Nrows, 1.0, A->o_diagInv, o_r, 0.0, o_res); setVector(parAlmond, A->Nrows, o_x, 0.0); //d = invTheta*res vectorAdd(parAlmond, A->Nrows, invTheta, o_res, 0.0, o_d); } else { //res = D^{-1}(r-Ax) level->device_Ax(level->AxArgs,o_x,o_res); vectorAdd(parAlmond, A->Nrows, 1.0, o_r, -1.0, o_res); dotStar(parAlmond, A->Nrows, A->o_diagInv, o_res); //d = invTheta*res vectorAdd(parAlmond, A->Nrows, invTheta, o_res, 0.0, o_d); } for (int k=0;k<level->ChebyshevIterations;k++) { //x_k+1 = x_k + d_k vectorAdd(parAlmond, A->Nrows, 1.0, o_d, 1.0, o_x); //r_k+1 = r_k - D^{-1}Ad_k level->device_Ax(level->AxArgs,o_d,o_Ad); dotStar(parAlmond, A->Nrows, -1.0, A->o_diagInv, o_Ad, 1.0, o_res); rho_np1 = 1.0/(2.*sigma-rho_n); //d_k+1 = rho_k+1*rho_k*d_k + 2*rho_k+1*r_k+1/delta vectorAdd(parAlmond, A->Nrows, 2.0*rho_np1/delta, o_res, rho_np1*rho_n, o_d); rho_n = rho_np1; } //x_k+1 = x_k + d_k vectorAdd(parAlmond, A->Nrows, 1.0, o_d, 1.0, o_x); occaTimerToc(parAlmond->device,"hyb smoothChebyshev"); } // set up halo infomation for inter-processor MPI // exchange of trace nodes void csrHaloSetup(csr *A, hlong *globalColStarts){ // MPI info int rank, size; rank = agmg::rank; size = agmg::size; // non-blocking MPI isend/irecv requests (used in meshHaloExchange) A->haloSendRequests = calloc(size, sizeof(MPI_Request)); A->haloRecvRequests = calloc(size, sizeof(MPI_Request)); // count number of halo element nodes to swap A->NrecvTotal = 0; A->NsendPairs = (int*) calloc(size, sizeof(int)); A->NrecvPairs = (int*) calloc(size, sizeof(int)); for(dlong n=A->NlocalCols;n<A->Ncols;++n){ //for just the halo hlong id = A->colMap[n]; // global index for (int r=0;r<size;r++) { //find owner's rank if (globalColStarts[r]-1<id && id < globalColStarts[r+1]) { A->NrecvTotal++; A->NrecvPairs[r]++; } } } MPI_Alltoall(A->NrecvPairs, 1, MPI_INT, A->NsendPairs, 1, MPI_INT, agmg::comm); A->NsendTotal = 0; for (int r=0;r<size;r++) A->NsendTotal += A->NsendPairs[r]; hlong *ghaloElementList; if (A->NsendTotal) { ghaloElementList = (hlong *) calloc(A->NsendTotal,sizeof(hlong)); A->haloElementList = (dlong *) calloc(A->NsendTotal,sizeof(dlong)); } // count number of MPI messages in halo exchange A->NsendMessages = 0; A->NrecvMessages = 0; for(int r=0;r<size;++r) { if(A->NsendPairs[r]) A->NsendMessages++; if(A->NrecvPairs[r]) A->NrecvMessages++; } //exchange the needed ids int tag = 999; dlong recvOffset = A->NlocalCols; int sendOffset = 0; int sendMessage = 0, recvMessage = 0; for(int r=0;r<size;++r){ if(A->NsendPairs[r]) { MPI_Irecv(ghaloElementList+sendOffset, A->NsendPairs[r], MPI_HLONG, r, tag, agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage); sendOffset += A->NsendPairs[r]; ++sendMessage; } if(A->NrecvPairs[r]){ MPI_Isend(A->colMap+recvOffset, A->NrecvPairs[r], MPI_HLONG, r, tag, agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage); recvOffset += A->NrecvPairs[r]; ++recvMessage; } } // Wait for all sent messages to have left and received messages to have arrived MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status)); MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status)); MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus); MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus); free(recvStatus); free(sendStatus); //shift to local ids for (int n=0;n<A->NsendTotal;n++) A->haloElementList[n] = (dlong) ghaloElementList[n] - globalColStarts[rank]; if (A->NsendTotal) A->sendBuffer = (dfloat *) calloc(A->NsendTotal,sizeof(dfloat)); A->totalHaloPairs = A->NsendTotal+A->NrecvTotal; } void csrHaloExchange(csr *A, size_t Nbytes, // message size per element void *sourceBuffer, void *sendBuffer, // temporary buffer void *recvBuffer) { // MPI info int rank, size; rank = agmg::rank; size = agmg::size; int tag = 999; // copy data from outgoing elements into temporary send buffer for(int i=0;i<A->NsendTotal;++i){ // outgoing element dlong id = A->haloElementList[i]; memcpy(((char*)sendBuffer)+i*Nbytes, ((char*)sourceBuffer)+id*Nbytes, Nbytes); } // initiate immediate send and receives to each other process as needed int recvOffset = 0; int sendOffset = 0; int sendMessage = 0, recvMessage = 0; for(int r=0;r<size;++r){ if (A->NrecvTotal) { if(A->NrecvPairs[r]) { MPI_Irecv(((char*)recvBuffer)+recvOffset, A->NrecvPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage); recvOffset += A->NrecvPairs[r]*Nbytes; ++recvMessage; } } if (A->NsendTotal) { if(A->NsendPairs[r]){ MPI_Isend(((char*)sendBuffer)+sendOffset, A->NsendPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage); sendOffset += A->NsendPairs[r]*Nbytes; ++sendMessage; } } } // Wait for all sent messages to have left and received messages to have arrived if (A->NsendTotal) { MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status)); MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus); free(sendStatus); } if (A->NrecvTotal) { MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status)); MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus); free(recvStatus); } } void csrHaloExchangeStart(csr *A, size_t Nbytes, // message size per element void *sourceBuffer, void *sendBuffer, // temporary buffer void *recvBuffer) { // MPI info int rank, size; rank = agmg::rank; size = agmg::size; int tag = 999; // copy data from outgoing elements into temporary send buffer for(int i=0;i<A->NsendTotal;++i){ // outgoing element dlong id = A->haloElementList[i]; memcpy(((char*)sendBuffer)+i*Nbytes, ((char*)sourceBuffer)+id*Nbytes, Nbytes); } // initiate immediate send and receives to each other process as needed int recvOffset = 0; int sendOffset = 0; int sendMessage = 0, recvMessage = 0; for(int r=0;r<size;++r){ if (A->NrecvTotal) { if(A->NrecvPairs[r]) { MPI_Irecv(((char*)recvBuffer)+recvOffset, A->NrecvPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage); recvOffset += A->NrecvPairs[r]*Nbytes; ++recvMessage; } } if (A->NsendTotal) { if(A->NsendPairs[r]){ MPI_Isend(((char*)sendBuffer)+sendOffset, A->NsendPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage); sendOffset += A->NsendPairs[r]*Nbytes; ++sendMessage; } } } } void csrHaloExchangeFinish(csr *A) { // Wait for all sent messages to have left and received messages to have arrived if (A->NsendTotal) { MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status)); MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus); free(sendStatus); } if (A->NrecvTotal) { MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status)); MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus); free(recvStatus); } } void dcooHaloExchangeStart(dcoo *A, size_t Nbytes, void *sendBuffer, void *recvBuffer) { // MPI info int rank, size; rank = agmg::rank; size = agmg::size; // count outgoing and incoming meshes int tag = 999; // initiate immediate send and receives to each other process as needed int recvOffset = 0; int sendOffset = 0; int sendMessage = 0, recvMessage = 0; for(int r=0;r<size;++r){ if (A->NrecvTotal) { if(A->NrecvPairs[r]) { MPI_Irecv(((char*)A->recvBuffer)+recvOffset, A->NrecvPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage); recvOffset += A->NrecvPairs[r]*Nbytes; ++recvMessage; } } if (A->NsendTotal) { if(A->NsendPairs[r]){ MPI_Isend(((char*)A->sendBuffer)+sendOffset, A->NsendPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage); sendOffset += A->NsendPairs[r]*Nbytes; ++sendMessage; } } } } void dcooHaloExchangeFinish(dcoo *A) { // Wait for all sent messages to have left and received messages to have arrived if (A->NsendTotal) { MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status)); MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus); free(sendStatus); } if (A->NrecvTotal) { MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status)); MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus); free(recvStatus); } } void hybHaloExchangeStart(hyb *A, size_t Nbytes, void *sendBuffer, void *recvBuffer) { // MPI info int rank, size; rank = agmg::rank; size = agmg::size; // count outgoing and incoming meshes int tag = 999; // initiate immediate send and receives to each other process as needed int recvOffset = 0; int sendOffset = 0; int sendMessage = 0, recvMessage = 0; for(int r=0;r<size;++r){ if (A->NrecvTotal) { if(A->NrecvPairs[r]) { MPI_Irecv(((char*)recvBuffer)+recvOffset, A->NrecvPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloRecvRequests+recvMessage); recvOffset += A->NrecvPairs[r]*Nbytes; ++recvMessage; } } if (A->NsendTotal) { if(A->NsendPairs[r]){ MPI_Isend(((char*)sendBuffer)+sendOffset, A->NsendPairs[r]*Nbytes, MPI_CHAR, r, tag, agmg::comm, (MPI_Request*)A->haloSendRequests+sendMessage); sendOffset += A->NsendPairs[r]*Nbytes; ++sendMessage; } } } } void hybHaloExchangeFinish(hyb *A) { // Wait for all sent messages to have left and received messages to have arrived if (A->NsendTotal) { MPI_Status *sendStatus = (MPI_Status*) calloc(A->NsendMessages, sizeof(MPI_Status)); MPI_Waitall(A->NsendMessages, (MPI_Request*)A->haloSendRequests, sendStatus); free(sendStatus); } if (A->NrecvTotal) { MPI_Status *recvStatus = (MPI_Status*) calloc(A->NrecvMessages, sizeof(MPI_Status)); MPI_Waitall(A->NrecvMessages, (MPI_Request*)A->haloRecvRequests, recvStatus); free(recvStatus); } }
ast-dump-openmp-parallel.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(void) { #pragma omp parallel ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel.c:3:1, line:6:1> line:3:6 test 'void (void)' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1> // CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <col:3> // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel.c:4:1) *const restrict'
imd_main_risc_2d.c
/****************************************************************************** * * IMD -- The ITAP Molecular Dynamics Program * * Copyright 1996-2011 Institute for Theoretical and Applied Physics, * University of Stuttgart, D-70550 Stuttgart * ******************************************************************************/ /****************************************************************************** * * imd_main_risc_2d.c -- main loop, risc specific part, two dimensions * ******************************************************************************/ /****************************************************************************** * $Revision$ * $Date$ ******************************************************************************/ #include "imd.h" /***************************************************************************** * * calc_forces * *****************************************************************************/ void calc_forces(int steps) { int n, k; /* clear global accumulation variables */ tot_pot_energy = 0.0; virial = 0.0; vir_xx = 0.0; vir_yy = 0.0; vir_xy = 0.0; nfc++; /* clear per atom accumulation variables */ #ifdef _OPENMP #pragma omp parallel for #endif for (k=0; k<ncells; ++k) { int i; cell *p; p = cell_array + k; for (i=0; i<p->n; ++i) { KRAFT(p,i,X) = 0.0; KRAFT(p,i,Y) = 0.0; POTENG(p,i) = 0.0; #ifdef NNBR NBANZ(p,i) = 0; #endif #if defined(STRESS_TENS) PRESSTENS(p,i,xx) = 0.0; PRESSTENS(p,i,yy) = 0.0; PRESSTENS(p,i,xy) = 0.0; #endif } } #ifdef RIGID /* clear total forces */ if ( nsuperatoms>0 ) for(k=0; k<nsuperatoms; k++) { superforce[k].x = 0.0; superforce[k].y = 0.0; } #endif /* compute forces for all pairs of cells */ for (n=0; n<nlists; ++n ) { #ifdef _OPENMP #pragma omp parallel for schedule(runtime) reduction(+:tot_pot_energy,virial,vir_xx,vir_yy,vir_xy) #endif for (k=0; k<npairs[n]; ++k) { vektor pbc; pair *P; P = pairs[n]+k; pbc.x = P->ipbc[0] * box_x.x + P->ipbc[1] * box_y.x; pbc.y = P->ipbc[0] * box_x.y + P->ipbc[1] * box_y.y; do_forces(cell_array + P->np, cell_array + P->nq, pbc, &tot_pot_energy, &virial, &vir_xx, &vir_yy, &vir_zz, &vir_yz, &vir_zx, &vir_xy); } } } /****************************************************************************** * * fix_cells * * check if each atom is in the correct cell; * move atoms that have left their cell * ******************************************************************************/ void fix_cells(void) { int i,j,l,clone; cell *p, *q; ivektor coord, dcpu, to_coord; /* apply periodic boundary conditions */ do_boundaries(); /* for each cell in bulk */ for (i=cellmin.x; i < cellmax.x; ++i) for (j=cellmin.y; j < cellmax.y; ++j) { p = PTR_2D_V(cell_array, i, j, cell_dim); /* loop over atoms in cell */ l=0; while( l < p->n ) { coord = cell_coord( ORT(p,l,X), ORT(p,l,Y) ); q = PTR_2D_VV(cell_array,coord,cell_dim); /* if it's in the wrong cell, move it to the right cell */ if (p != q) { MOVE_ATOM(q, p, l); #ifdef CLONE if (l < p->n - nclones) for (clone=1; clone<nclones; clone++) MOVE_ATOM(q, p, l+clone); else /* we are dealing with the last in the stack */ for (clone=1; clone<nclones; clone++) MOVE_ATOM(q, p, l); #endif } else ++l; } } }
eam.c
/******************************************************************************* Copyright (c) 2016 Advanced Micro Devices, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *******************************************************************************/ /// \file /// Compute forces for the Embedded Atom Model (EAM). /// /// The Embedded Atom Model (EAM) is a widely used model of atomic /// interactions in simple metals. /// /// http://en.wikipedia.org/wiki/Embedded_atom_model /// /// In the EAM, the total potential energy is written as a sum of a pair /// potential and the embedding energy, F: /// /// \f[ /// U = \sum_{ij} \varphi(r_{ij}) + \sum_i F({\bar\rho_i}) /// \f] /// /// The pair potential \f$\varphi_{ij}\f$ is a two-body inter-atomic /// potential, similar to the Lennard-Jones potential, and /// \f$F(\bar\rho)\f$ is interpreted as the energy required to embed an /// atom in an electron field with density \f$\bar\rho\f$. The local /// electon density at site i is calulated by summing the "effective /// electron density" due to all neighbors of atom i: /// /// \f[ /// \bar\rho_i = \sum_j \rho_j(r_{ij}) /// \f] /// /// The force on atom i, \f${\bf F}_i\f$ is given by /// /// \f{eqnarray*}{ /// {\bf F}_i & = & -\nabla_i \sum_{jk} U(r_{jk})\\ /// & = & - \sum_j\left\{ /// \varphi'(r_{ij}) + /// [F'(\bar\rho_i) + F'(\bar\rho_j)]\rho'(r_{ij}) /// \right\} \hat{r}_{ij} /// \f} /// /// where primes indicate the derivative of a function with respect to /// its argument and \f$\hat{r}_{ij}\f$ is a unit vector in the /// direction from atom i to atom j. /// /// The form of this force expression has two significant consequences. /// First, unlike with a simple pair potential, it is not possible to /// compute the potential energy and the forces on the atoms in a single /// loop over the pairs. The terms involving \f$ F'(\bar\rho) \f$ /// cannot be calculated until \f$ \bar\rho \f$ is known, but /// calculating \f$ \bar\rho \f$ requires a loop over the pairs. Hence /// the EAM force routine contains three loops. /// /// -# Loop over all pairs, compute the two-body /// interaction and the electron density at each atom /// -# Loop over all atoms, compute the embedding energy and its /// derivative for each atom /// -# Loop over all pairs, compute the embedding /// energy contribution to the force and add to the two-body force /// /// The second loop over pairs doubles the data motion requirement /// relative to a simple pair potential. /// /// The second consequence of the force expression is that computing the /// forces on all atoms requires additional communication beyond the /// coordinates of all remote atoms within the cutoff distance. This is /// again because of the terms involving \f$ F'(\bar\rho_j) \f$. If /// atom j is a remote atom, the local task cannot compute \f$ /// \bar\rho_j \f$. (Such a calculation would require all the neighbors /// of atom j, some of which can be up to 2 times the cutoff distance /// away from a local atom---outside the typical halo exchange range.) /// /// To obtain the needed remote density we introduce a second halo /// exchange after loop number 2 to communicate \f$ F'(\bar\rho) \f$ for /// remote atoms. This provides the data we need to complete the third /// loop, but at the cost of introducing a communication operation in /// the middle of the force routine. /// /// At least two alternate methods can be used to deal with the remote /// density problem. One possibility is to extend the halo exchange /// radius for the atom exchange to twice the potential cutoff distance. /// This is likely undesirable due to large increase in communication /// volume. The other possibility is to accumulate partial force terms /// on the tasks where they can be computed. In this method, tasks will /// compute force contributions for remote atoms, then communicate the /// partial forces at the end of the halo exchange. This method has the /// advantage that the communication is deffered until after the force /// loops, but the disadvantage that three times as much data needs to /// be set (three components of the force vector instead of a single /// scalar \f$ F'(\bar\rho) \f$. #include "eam.h" #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include "constants.h" #include "memUtils.h" #include "parallel.h" #include "linkCells.h" #include "CoMDTypes.h" #include "performanceTimers.h" #include "haloExchange.h" #define MAX(A,B) ((A) > (B) ? (A) : (B)) /// Handles interpolation of tabular data. /// /// \see initInterpolationObject /// \see interpolate typedef struct InterpolationObjectSt { int n; //!< the number of values in the table real_t x0; //!< the starting ordinate range real_t invDx; //!< the inverse of the table spacing real_t* values; //!< the abscissa values } InterpolationObject; /// Derived struct for an EAM potential. /// Uses table lookups for function evaluation. /// Polymorphic with BasePotential. /// \see BasePotential typedef struct EamPotentialSt { real_t cutoff; //!< potential cutoff distance in Angstroms real_t mass; //!< mass of atoms in intenal units real_t lat; //!< lattice spacing (angs) of unit cell char latticeType[8]; //!< lattice type, e.g. FCC, BCC, etc. char name[3]; //!< element name int atomicNo; //!< atomic number int (*force)(SimFlat* s); //!< function pointer to force routine void (*print)(FILE* file, BasePotential* pot); void (*destroy)(BasePotential** pot); //!< destruction of the potential InterpolationObject* phi; //!< Pair energy InterpolationObject* rho; //!< Electron Density InterpolationObject* f; //!< Embedding Energy real_t* rhobar; //!< per atom storage for rhobar real_t* dfEmbed; //!< per atom storage for derivative of Embedding HaloExchange* forceExchange; ForceExchangeData* forceExchangeData; } EamPotential; // EAM functionality static int eamForce(SimFlat* s); static void eamPrint(FILE* file, BasePotential* pot); static void eamDestroy(BasePotential** pot); static void eamBcastPotential(EamPotential* pot); // Table interpolation functionality static InterpolationObject* initInterpolationObject( int n, real_t x0, real_t dx, real_t* data); static void destroyInterpolationObject(InterpolationObject** table); #pragma omp declare target static void interpolate(int n, real_t x0, real_t invDx, real_t* values, real_t r, real_t* f, real_t* df); #pragma omp end declare target static void bcastInterpolationObject(InterpolationObject** table); static void printTableData(InterpolationObject* table, const char* fileName); // Read potential tables from files. static void eamReadSetfl(EamPotential* pot, const char* dir, const char* potName); static void eamReadFuncfl(EamPotential* pot, const char* dir, const char* potName); static void fileNotFound(const char* callSite, const char* filename); static void notAlloyReady(const char* callSite); static void typeNotSupported(const char* callSite, const char* type); /// Allocate and initialize the EAM potential data structure. /// /// \param [in] dir The directory in which potential table files are found. /// \param [in] file The name of the potential table file. /// \param [in] type The file format of the potential file (setfl or funcfl). BasePotential* initEamPot(const char* dir, const char* file, const char* type) { EamPotential* pot = comdMalloc(sizeof(EamPotential)); assert(pot); pot->force = eamForce; pot->print = eamPrint; pot->destroy = eamDestroy; pot->phi = NULL; pot->rho = NULL; pot->f = NULL; // Initialization of the next three items requires information about // the parallel decomposition and link cells that isn't available // with the potential is initialized. Hence, we defer their // initialization until the first time we call the force routine. pot->dfEmbed = NULL; pot->rhobar = NULL; pot->forceExchange = NULL; if (getMyRank() == 0) { if (strcmp(type, "setfl" ) == 0) eamReadSetfl(pot, dir, file); else if (strcmp(type,"funcfl") == 0) eamReadFuncfl(pot, dir, file); else typeNotSupported("initEamPot", type); } eamBcastPotential(pot); return (BasePotential*) pot; } /// Calculate potential energy and forces for the EAM potential. /// /// Three steps are required: /// /// -# Loop over all atoms and their neighbors, compute the two-body /// interaction and the electron density at each atom /// -# Loop over all atoms, compute the embedding energy and its /// derivative for each atom /// -# Loop over all atoms and their neighbors, compute the embedding /// energy contribution to the force and add to the two-body force /// int eamForce(SimFlat* s) { EamPotential* pot = (EamPotential*) s->pot; assert(pot); int nTotalBoxes = s->boxes->nTotalBoxes; int maxTotalAtoms = MAXATOMS*nTotalBoxes; // set up halo exchange and internal storage on first call to forces. if (pot->forceExchange == NULL) { pot->dfEmbed = comdMalloc(maxTotalAtoms*sizeof(real_t)); pot->rhobar = comdMalloc(maxTotalAtoms*sizeof(real_t)); pot->forceExchange = initForceHaloExchange(s->domain, s->boxes); pot->forceExchangeData = comdMalloc(sizeof(ForceExchangeData)); pot->forceExchangeData->dfEmbed = pot->dfEmbed; pot->forceExchangeData->boxes = s->boxes; } real_t rCut2 = pot->cutoff*pot->cutoff; real_t etot = 0.; // zero forces / energy / rho /rhoprime int nNbrBoxes = 27; //get local pointers for OpenMP4 target clauses int nLocalBoxes = s->boxes->nLocalBoxes; int *nAtoms = s->boxes->nAtoms; int *nbrBoxes = (int *)s->boxes->nbrBoxes; real_t *rhobar = pot->rhobar; real_t *dfEmbed = pot->dfEmbed; real_t *r = (real_t *) s->atoms->r; real_t *f = (real_t *) s->atoms->f; real_t *U = s->atoms->U; InterpolationObject *phi = pot->phi; int phi_n = phi->n; real_t phi_x0 = phi->x0; real_t phi_invDx = phi->invDx; real_t *vphi = phi->values; InterpolationObject *rho = pot->rho; int rho_n = rho->n; real_t rho_x0 = rho->x0; real_t rho_invDx = rho->invDx; real_t *vrho = rho->values; InterpolationObject *fio = pot->f; int fio_n = fio->n; real_t fio_x0 = fio->x0; real_t fio_invDx = fio->invDx; real_t *vfio = fio->values; // allocate an array for storing team reduction results real_t *ePotTeam = (real_t *)calloc(nLocalBoxes, sizeof(real_t)); #pragma omp target data map(to: maxTotalAtoms) \ map(from: rhobar[:maxTotalAtoms], \ dfEmbed[:maxTotalAtoms], \ f[:maxTotalAtoms*3], \ U[:maxTotalAtoms]) #pragma omp target #pragma omp parallel for for (int ii=0; ii<maxTotalAtoms; ii++) { for (int m=0; m<3; m++) f[ii*3+m] = 0.; U[ii] = 0.; dfEmbed[ii] = 0.; rhobar[ii] = 0.; } // loop over local boxes #pragma omp target data map(to: nLocalBoxes,nNbrBoxes, rCut2, \ nAtoms[:nTotalBoxes], \ nbrBoxes[:maxTotalAtoms], \ phi_n, phi_x0, phi_invDx, \ rho_n, rho_x0, rho_invDx, \ vphi[:phi_n],vrho[:rho_n]) \ map(tofrom: U[:maxTotalAtoms], \ r[:maxTotalAtoms*3],f[:maxTotalAtoms*3], \ rhobar[:maxTotalAtoms], \ ePotTeam[:nLocalBoxes]) #pragma omp target teams distribute num_teams(nLocalBoxes) thread_limit(27) for (int iBox=0; iBox<nLocalBoxes; iBox++) { int nIBox = nAtoms[iBox]; real_t ePot1 = 0.0; // loop over neighbor boxes of iBox (some may be halo boxes) #pragma omp parallel for reduction(+:ePot1) for (int jTmp=0; jTmp<nNbrBoxes; jTmp++) { int jBox = nbrBoxes[iBox * 27 + jTmp]; int nJBox = nAtoms[jBox]; // loop over atoms in iBox for (int iOff=MAXATOMS*iBox; iOff<(iBox*MAXATOMS+nIBox); iOff++) { // loop over atoms in jBox for (int jOff=MAXATOMS*jBox; jOff<(jBox*MAXATOMS+nJBox); jOff++) { real3 dr; real_t r2 = 0.0; for (int k=0; k<3; k++) { dr[k]=r[iOff*3+k]-r[jOff*3+k]; r2+=dr[k]*dr[k]; } if(r2 <= rCut2 && r2 > 0.0) { real_t rr = sqrt(r2); real_t phiTmp, dPhi, rhoTmp, dRho; interpolate(phi_n, phi_x0, phi_invDx, vphi, rr, &phiTmp, &dPhi); interpolate(rho_n, rho_x0, rho_invDx, vrho, rr, &rhoTmp, &dRho); for (int k=0; k<3; k++) { #pragma omp atomic update f[iOff*3+k] -= dPhi*dr[k]/rr; } // Calculate energy contribution #pragma omp atomic update U[iOff] += 0.5*phiTmp; ePot1 += 0.5*phiTmp; // accumulate rhobar for each atom #pragma omp atomic update rhobar[iOff] += rhoTmp; } } // loop over atoms in jBox } // loop over atoms in iBox } // loop over neighbor boxes ePotTeam[iBox] = ePot1; } // loop over local boxes // finish the reduction for(int i = 0; i < nLocalBoxes; i++) etot += ePotTeam[i]; // Compute Embedding Energy // loop over all local boxes #pragma omp target data map(tofrom: U[:maxTotalAtoms], \ dfEmbed[:maxTotalAtoms]) \ map(from: ePotTeam[:nLocalBoxes]) \ map(to: nLocalBoxes, nAtoms[:nTotalBoxes], \ fio_n,fio_x0,fio_invDx, \ rhobar[:maxTotalAtoms], \ vfio[:fio_n]) #pragma omp target teams distribute num_teams(nLocalBoxes) thread_limit(27) for (int iBox=0; iBox<nLocalBoxes; iBox++) { int nIBox = nAtoms[iBox]; real_t ePot1 = 0.0; // loop over atoms in iBox #pragma omp parallel for reduction(+:ePot1) for (int iOff=MAXATOMS*iBox; iOff<(MAXATOMS*iBox+nIBox); iOff++) { real_t fEmbed, ldfEmbed; interpolate(fio_n, fio_x0, fio_invDx, vfio, rhobar[iOff], &fEmbed, &ldfEmbed); dfEmbed[iOff] = ldfEmbed; // save derivative for halo exchange #pragma omp atomic update U[iOff] += fEmbed; ePot1 += fEmbed; } ePotTeam[iBox] = ePot1; } // finish the reduction for(int i = 0; i < nLocalBoxes; i++) etot += ePotTeam[i]; // exchange derivative of the embedding energy with repsect to rhobar startTimer(eamHaloTimer); haloExchange(pot->forceExchange, pot->forceExchangeData); stopTimer(eamHaloTimer); // third pass // loop over local boxes #pragma omp target data map(tofrom: f[:maxTotalAtoms*3]) \ map(to: rho_n,rho_x0,rho_invDx,nLocalBoxes, \ nAtoms[:nTotalBoxes], \ nbrBoxes[:nTotalBoxes*27], \ nNbrBoxes, rCut2, \ r[:maxTotalAtoms*3], \ dfEmbed[:maxTotalAtoms], \ vrho[:rho_n]) #pragma omp target teams distribute num_teams(nLocalBoxes) thread_limit(27) for (int iBox=0; iBox<nLocalBoxes; iBox++) { int nIBox = nAtoms[iBox]; // loop over neighbor boxes of iBox (some may be halo boxes) #pragma omp parallel for for (int jTmp=0; jTmp<nNbrBoxes; jTmp++) { int jBox = nbrBoxes[iBox *27 + jTmp]; int nJBox = nAtoms[jBox]; // loop over atoms in iBox for (int iOff=MAXATOMS*iBox; iOff<(MAXATOMS*iBox+nIBox); iOff++) { // loop over atoms in jBox for (int jOff=MAXATOMS*jBox; jOff<(MAXATOMS*jBox+nJBox); jOff++) { real_t r2 = 0.0; real3 dr; for (int k=0; k<3; k++) { dr[k]=r[iOff*3+k]-r[jOff*3+k]; r2+=dr[k]*dr[k]; } if(r2 <= rCut2 && r2 > 0.0) { real_t r = sqrt(r2); real_t rhoTmp, dRho; interpolate(rho_n, rho_x0, rho_invDx, vrho, r, &rhoTmp, &dRho); for (int k=0; k<3; k++) { #pragma omp atomic update f[iOff*3+k] -= (dfEmbed[iOff]+dfEmbed[jOff])*dRho*dr[k]/r; } } } // loop over atoms in jBox } // loop over atoms in iBox } // loop over neighbor boxes } // loop over local boxes s->ePotential = (real_t) etot; return 0; } void eamPrint(FILE* file, BasePotential* pot) { EamPotential *eamPot = (EamPotential*) pot; fprintf(file, " Potential type : EAM\n"); fprintf(file, " Species name : %s\n", eamPot->name); fprintf(file, " Atomic number : %d\n", eamPot->atomicNo); fprintf(file, " Mass : "FMT1" amu\n", eamPot->mass/amuToInternalMass); // print in amu fprintf(file, " Lattice type : %s\n", eamPot->latticeType); fprintf(file, " Lattice spacing : "FMT1" Angstroms\n", eamPot->lat); fprintf(file, " Cutoff : "FMT1" Angstroms\n", eamPot->cutoff); } void eamDestroy(BasePotential** pPot) { if ( ! pPot ) return; EamPotential* pot = *(EamPotential**)pPot; if ( ! pot ) return; destroyInterpolationObject(&(pot->phi)); destroyInterpolationObject(&(pot->rho)); destroyInterpolationObject(&(pot->f)); destroyHaloExchange(&(pot->forceExchange)); comdFree(pot); *pPot = NULL; return; } /// Broadcasts an EamPotential from rank 0 to all other ranks. /// If the table coefficients are read from a file only rank 0 does the /// read. Hence we need to broadcast the potential to all other ranks. void eamBcastPotential(EamPotential* pot) { assert(pot); struct { real_t cutoff, mass, lat; char latticeType[8]; char name[3]; int atomicNo; } buf; if (getMyRank() == 0) { buf.cutoff = pot->cutoff; buf.mass = pot->mass; buf.lat = pot->lat; buf.atomicNo = pot->atomicNo; strcpy(buf.latticeType, pot->latticeType); strcpy(buf.name, pot->name); } bcastParallel(&buf, sizeof(buf), 0); pot->cutoff = buf.cutoff; pot->mass = buf.mass; pot->lat = buf.lat; pot->atomicNo = buf.atomicNo; strcpy(pot->latticeType, buf.latticeType); strcpy(pot->name, buf.name); bcastInterpolationObject(&pot->phi); bcastInterpolationObject(&pot->rho); bcastInterpolationObject(&pot->f); } /// Builds a structure to store interpolation data for a tabular /// function. Interpolation must be supported on the range /// \f$[x_0, x_n]\f$, where \f$x_n = n*dx\f$. /// /// \see interpolate /// \see bcastInterpolationObject /// \see destroyInterpolationObject /// /// \param [in] n number of values in the table. /// \param [in] x0 minimum ordinate value of the table. /// \param [in] dx spacing of the ordinate values. /// \param [in] data abscissa values. An array of size n. InterpolationObject* initInterpolationObject( int n, real_t x0, real_t dx, real_t* data) { InterpolationObject* table = (InterpolationObject *)comdMalloc(sizeof(InterpolationObject)) ; assert(table); table->values = (real_t*)comdCalloc(1, (n+3)*sizeof(real_t)); assert(table->values); table->values++; table->n = n; table->invDx = 1.0/dx; table->x0 = x0; for (int ii=0; ii<n; ++ii) table->values[ii] = data[ii]; table->values[-1] = table->values[0]; table->values[n+1] = table->values[n] = table->values[n-1]; return table; } void destroyInterpolationObject(InterpolationObject** a) { if ( ! a ) return; if ( ! *a ) return; if ( (*a)->values) { (*a)->values--; comdFree((*a)->values); } comdFree(*a); *a = NULL; return; } /// Interpolate a table to determine f(r) and its derivative f'(r). /// /// The forces on the particle are much more sensitive to the derivative /// of the potential than on the potential itself. It is therefore /// absolutely essential that the interpolated derivatives are smooth /// and continuous. This function uses simple quadratic interpolation /// to find f(r). Since quadric interpolants don't have smooth /// derivatives, f'(r) is computed using a 4 point finite difference /// stencil. /// /// Interpolation is used heavily by the EAM force routine so this /// function is a potential performance hot spot. Feel free to /// reimplement this function (and initInterpolationObject if necessay) /// with any higher performing implementation of interpolation, as long /// as the alternate implmentation that has the required smoothness /// properties. Cubic splines are one common alternate choice. /// /// \param [in] table Interpolation table. /// \param [in] r Point where function value is needed. /// \param [out] f The interpolated value of f(r). /// \param [out] df The interpolated value of df(r)/dr. #pragma omp declare target static void interpolate(int n, real_t x0, real_t invDx, real_t* values, real_t r, real_t* f, real_t* df) { const real_t* tt = values; // alias if ( r < x0 ) r = x0; r = (r-x0)*(invDx) ; int ii = (int)floor(r); if (ii > n) { ii = n; r = n / invDx; } // reset r to fractional distance r = r - floor(r); real_t g1 = tt[ii+1] - tt[ii-1]; real_t g2 = tt[ii+2] - tt[ii]; *f = tt[ii] + 0.5*r*(g1 + r*(tt[ii+1] + tt[ii-1] - 2.0*tt[ii]) ); *df = 0.5*(g1 + r*(g2-g1))*invDx; } #pragma omp end declare target /// Broadcasts an InterpolationObject from rank 0 to all other ranks. /// /// It is commonly the case that the data needed to create the /// interpolation table is available on only one task (for example, only /// one task has read the data from a file). Broadcasting the table /// eliminates the need to put broadcast code in multiple table readers. /// /// \see eamBcastPotential void bcastInterpolationObject(InterpolationObject** table) { struct { int n; real_t x0, invDx; } buf; if (getMyRank() == 0) { buf.n = (*table)->n; buf.x0 = (*table)->x0; buf.invDx = (*table)->invDx; } bcastParallel(&buf, sizeof(buf), 0); if (getMyRank() != 0) { assert(*table == NULL); *table = comdMalloc(sizeof(InterpolationObject)); (*table)->n = buf.n; (*table)->x0 = buf.x0; (*table)->invDx = buf.invDx; (*table)->values = comdMalloc(sizeof(real_t) * (buf.n+3) ); (*table)->values++; } int valuesSize = sizeof(real_t) * ((*table)->n+3); bcastParallel((*table)->values-1, valuesSize, 0); } void printTableData(InterpolationObject* table, const char* fileName) { if (!printRank()) return; FILE* potData; potData = fopen(fileName,"w"); real_t dR = 1.0/table->invDx; for (int i = 0; i<table->n; i++) { real_t r = table->x0+i*dR; fprintf(potData, "%d %e %e\n", i, r, table->values[i]); } fclose(potData); } /// Reads potential data from a setfl file and populates /// corresponding members and InterpolationObjects in an EamPotential. /// /// setfl is a file format for tabulated potential functions used by /// the original EAM code DYNAMO. A setfl file contains EAM /// potentials for multiple elements. /// /// The contents of a setfl file are: /// /// | Line Num | Description /// | :------: | :---------- /// | 1 - 3 | comments /// | 4 | ntypes type1 type2 ... typen /// | 5 | nrho drho nr dr rcutoff /// | F, rho | Following line 5 there is a block for each atom type with F, and rho. /// | b1 | ielem(i) amass(i) latConst(i) latType(i) /// | b2 | embedding function values F(rhobar) starting at rhobar=0 /// | ... | (nrho values. Multiple values per line allowed.) /// | bn | electron density, starting at r=0 /// | ... | (nr values. Multiple values per line allowed.) /// | repeat | Return to b1 for each atom type. /// | phi | phi_ij for (1,1), (2,1), (2,2), (3,1), (3,2), (3,3), (4,1), ..., /// | p1 | pair potential between type i and type j, starting at r=0 /// | ... | (nr values. Multiple values per line allowed.) /// | repeat | Return to p1 for each phi_ij /// /// Where: /// - ntypes : number of element types in the potential /// - nrho : number of points the embedding energy F(rhobar) /// - drho : table spacing for rhobar /// - nr : number of points for rho(r) and phi(r) /// - dr : table spacing for r in Angstroms /// - rcutoff : cut-off distance in Angstroms /// - ielem(i) : atomic number for element(i) /// - amass(i) : atomic mass for element(i) in AMU /// - latConst(i) : lattice constant for element(i) in Angstroms /// - latType(i) : lattice type for element(i) /// /// setfl format stores r*phi(r), so we need to converted to the pair /// potential phi(r). In the file, phi(r)*r is in eV*Angstroms. /// NB: phi is not defined for r = 0 /// /// F(rhobar) is in eV. /// void eamReadSetfl(EamPotential* pot, const char* dir, const char* potName) { char tmp[4096]; sprintf(tmp, "%s/%s", dir, potName); FILE* potFile = fopen(tmp, "r"); if (potFile == NULL) fileNotFound("eamReadSetfl", tmp); // read the first 3 lines (comments) fgets(tmp, sizeof(tmp), potFile); fgets(tmp, sizeof(tmp), potFile); fgets(tmp, sizeof(tmp), potFile); // line 4 fgets(tmp, sizeof(tmp), potFile); int nElems; sscanf(tmp, "%d", &nElems); if( nElems != 1 ) notAlloyReady("eamReadSetfl"); //line 5 int nRho, nR; double dRho, dR, cutoff; // The same cutoff is used by all alloys, NB: cutoff = nR * dR is redundant fgets(tmp, sizeof(tmp), potFile); sscanf(tmp, "%d %le %d %le %le", &nRho, &dRho, &nR, &dR, &cutoff); pot->cutoff = cutoff; // **** THIS CODE IS RESTRICTED TO ONE ELEMENT // Per-atom header fgets(tmp, sizeof(tmp), potFile); int nAtomic; double mass, lat; char latticeType[8]; sscanf(tmp, "%d %le %le %s", &nAtomic, &mass, &lat, latticeType); pot->atomicNo = nAtomic; pot->lat = lat; pot->mass = mass * amuToInternalMass; // file has mass in AMU. strcpy(pot->latticeType, latticeType); // allocate read buffer int bufSize = MAX(nRho, nR); real_t* buf = comdMalloc(bufSize * sizeof(real_t)); real_t x0 = 0.0; // Read embedding energy F(rhobar) for (int ii=0; ii<nRho; ++ii) fscanf(potFile, FMT1, buf+ii); pot->f = initInterpolationObject(nRho, x0, dRho, buf); // Read electron density rho(r) for (int ii=0; ii<nR; ++ii) fscanf(potFile, FMT1, buf+ii); pot->rho = initInterpolationObject(nR, x0, dR, buf); // Read phi(r)*r and convert to phi(r) for (int ii=0; ii<nR; ++ii) fscanf(potFile, FMT1, buf+ii); for (int ii=1; ii<nR; ++ii) { real_t r = x0 + ii*dR; buf[ii] /= r; } buf[0] = buf[1] + (buf[1] - buf[2]); // Linear interpolation to get phi[0]. pot->phi = initInterpolationObject(nR, x0, dR, buf); comdFree(buf); // write to text file for comparison, currently commented out /* printPot(pot->f, "SetflDataF.txt"); */ /* printPot(pot->rho, "SetflDataRho.txt"); */ /* printPot(pot->phi, "SetflDataPhi.txt"); */ } /// Reads potential data from a funcfl file and populates /// corresponding members and InterpolationObjects in an EamPotential. /// /// funcfl is a file format for tabulated potential functions used by /// the original EAM code DYNAMO. A funcfl file contains an EAM /// potential for a single element. /// /// The contents of a funcfl file are: /// /// | Line Num | Description /// | :------: | :---------- /// | 1 | comments /// | 2 | elem amass latConstant latType /// | 3 | nrho drho nr dr rcutoff /// | 4 | embedding function values F(rhobar) starting at rhobar=0 /// | ... | (nrho values. Multiple values per line allowed.) /// | x' | electrostatic interation Z(r) starting at r=0 /// | ... | (nr values. Multiple values per line allowed.) /// | y' | electron density values rho(r) starting at r=0 /// | ... | (nr values. Multiple values per line allowed.) /// /// Where: /// - elem : atomic number for this element /// - amass : atomic mass for this element in AMU /// - latConstant : lattice constant for this elemnent in Angstroms /// - lattticeType : lattice type for this element (e.g. FCC) /// - nrho : number of values for the embedding function, F(rhobar) /// - drho : table spacing for rhobar /// - nr : number of values for Z(r) and rho(r) /// - dr : table spacing for r in Angstroms /// - rcutoff : potential cut-off distance in Angstroms /// /// funcfl format stores the "electrostatic interation" Z(r). This needs to /// be converted to the pair potential phi(r). /// using the formula /// \f[phi = Z(r) * Z(r) / r\f] /// NB: phi is not defined for r = 0 /// /// Z(r) is in atomic units (i.e., sqrt[Hartree * bohr]) so it is /// necesary to convert to eV. /// /// F(rhobar) is in eV. /// void eamReadFuncfl(EamPotential* pot, const char* dir, const char* potName) { char tmp[4096]; sprintf(tmp, "%s/%s", dir, potName); FILE* potFile = fopen(tmp, "r"); if (potFile == NULL) fileNotFound("eamReadFuncfl", tmp); // line 1 fgets(tmp, sizeof(tmp), potFile); char name[3]; sscanf(tmp, "%s", name); strcpy(pot->name, name); // line 2 int nAtomic; double mass, lat; char latticeType[8]; fgets(tmp,sizeof(tmp),potFile); sscanf(tmp, "%d %le %le %s", &nAtomic, &mass, &lat, latticeType); pot->atomicNo = nAtomic; pot->lat = lat; pot->mass = mass*amuToInternalMass; // file has mass in AMU. strcpy(pot->latticeType, latticeType); // line 3 int nRho, nR; double dRho, dR, cutoff; fgets(tmp,sizeof(tmp),potFile); sscanf(tmp, "%d %le %d %le %le", &nRho, &dRho, &nR, &dR, &cutoff); pot->cutoff = cutoff; real_t x0 = 0.0; // tables start at zero. // allocate read buffer int bufSize = MAX(nRho, nR); real_t* buf = comdMalloc(bufSize * sizeof(real_t)); // read embedding energy for (int ii=0; ii<nRho; ++ii) fscanf(potFile, FMT1, buf+ii); pot->f = initInterpolationObject(nRho, x0, dRho, buf); // read Z(r) and convert to phi(r) for (int ii=0; ii<nR; ++ii) fscanf(potFile, FMT1, buf+ii); for (int ii=1; ii<nR; ++ii) { real_t r = x0 + ii*dR; buf[ii] *= buf[ii] / r; buf[ii] *= hartreeToEv * bohrToAngs; // convert to eV } buf[0] = buf[1] + (buf[1] - buf[2]); // linear interpolation to get phi[0]. pot->phi = initInterpolationObject(nR, x0, dR, buf); // read electron density rho for (int ii=0; ii<nR; ++ii) fscanf(potFile, FMT1, buf+ii); pot->rho = initInterpolationObject(nR, x0, dR, buf); comdFree(buf); /* printPot(pot->f, "funcflDataF.txt"); */ /* printPot(pot->rho, "funcflDataRho.txt"); */ /* printPot(pot->phi, "funcflDataPhi.txt"); */ } void fileNotFound(const char* callSite, const char* filename) { fprintf(screenOut, "%s: Can't open file %s. Fatal Error.\n", callSite, filename); exit(-1); } void notAlloyReady(const char* callSite) { fprintf(screenOut, "%s: CoMD 1.1 does not support alloys and cannot\n" " read setfl files with multiple species. Fatal Error.\n", callSite); exit(-1); } void typeNotSupported(const char* callSite, const char* type) { fprintf(screenOut, "%s: Potential type %s not supported. Fatal Error.\n", callSite, type); exit(-1); }
fast_gaussian_blur_template.h
// Copyright (C) 2017-2022 Basile Fraboni // Copyright (C) 2014 Ivan Kutskir (for the original fast blur implmentation) // All Rights Reserved // You may use, distribute and modify this code under the // terms of the MIT license. For further details please refer // to : https://mit-license.org/ // //#pragma once #include <Arduino.h> //! //! \file fast_gaussian_blur_template.h //! \author Basile Fraboni //! \date 2017 - 2022 //! //! \brief This contains a C++ implementation of a fast Gaussian blur algorithm in linear time. //! //! The image buffer is supposed to be of size w * h * c, h its height, with w its width, //! and c its number of channels. //! The default implementation only supports up to 4 channels images, but one can easily add support for any number of channels //! using either specific template cases or a generic function that takes the number of channels as an explicit parameter. //! This implementation is focused on learning and readability more than on performance. //! The fast blur algorithm is performed with several box blur passes over an image. //! The filter converges towards a true Gaussian blur after several passes (thanks TCL). In practice, //! three passes are sufficient for good quality results. //! For further details please refer to: //! - http://blog.ivank.net/fastest-gaussian-blur.html //! - https://www.peterkovesi.com/papers/FastGaussianSmoothing.pdf //! - https://github.com/bfraboni/FastGaussianBlur //! //! **Note:** The fast gaussian blur algorithm is not accurate on image boundaries. //! It performs a diffusion of the signal with several independant passes, each pass depending //! of the preceding one. Some of the diffused signal is lost near borders and results in a slight //! loss of accuracy for next pass. This problem can be solved by increasing the image support of //! half the box kernel extent at each pass of the algorithm. The added padding would in this case //! capture the diffusion and make the next pass accurate. //! On contrary true Gaussian blur does not suffer this problem since the whole diffusion process //! is performed in one pass only. //! The extra padding is not performed in this implementation, however we provide and discuss several border //! policies resulting in dfferent approximations and accuracies. //! //! //! \brief Enumeration that decribes border policies for filters. //! //! For a detailed description of border policies please refer to: //! https://en.wikipedia.org/wiki/Kernel_(image_processing)#Edge_Handling //! //! \todo Add support for other border policies (wrap, mirror, constant) enum BorderPolicy { kExtend, kKernelCrop, // kWrap, // kMirror, // kConstant }; //! //! \brief This function performs a single separable horizontal box blur pass. //! //! To complete a box blur pass we need to do this operation two times, one horizontally //! and one vertically. Templated by buffer data type T, buffer number of channels C, and border policy P. //! //! \param[in] in source buffer //! \param[in,out] out target buffer //! \param[in] w image width //! \param[in] h image height //! \param[in] r box dimension //! template<typename T, int C, BorderPolicy P = kKernelCrop> void horizontal_blur(const T * in, T * out, const int w, const int h, const int r) { float iarr = 1.f / (r+r+1); #pragma omp parallel for for(int i=0; i<h; i++) { int ti = i*w, li = ti, ri = ti+r; float fv[C], lv[C], val[C]; for(int ch = 0; ch < C; ++ch) { fv[ch] = P == kExtend ? in[ti*C+ch] : 0; // unused with kcrop policy lv[ch] = P == kExtend ? in[(ti+w-1)*C+ch] : 0; // unused with kcrop policy val[ch] = P == kExtend ? (r+1)*fv[ch] : 0; } // initial acucmulation for(int j=0; j<r; j++) for(int ch = 0; ch < C; ++ch) { val[ch] += in[(ti+j)*C+ch]; } // left border - filter kernel is incomplete for(int j=0; j<=r; j++, ri++, ti++) for(int ch = 0; ch < C; ++ch) { val[ch] += P == kExtend ? in[ri*C+ch] - fv[ch] : in[ri*C+ch]; out[ti*C+ch] = P == kExtend ? val[ch]*iarr : val[ch]/(r+j+1); } // center of the image - filter kernel is complete for(int j=r+1; j<w-r; j++, ri++, ti++, li++) for(int ch = 0; ch < C; ++ch) { val[ch] += in[ri*C+ch] - in[li*C+ch]; out[ti*C+ch] = val[ch]*iarr; } // right border - filter kernel is incomplete for(int j=w-r; j<w; j++, ti++, li++) for(int ch = 0; ch < C; ++ch) { val[ch] += P == kExtend ? lv[ch] - in[li*C+ch] : -in[li*C+ch]; out[ti*C+ch] = P == kExtend ? val[ch]*iarr : val[ch]/(r+w-j); } } } //! //! \brief Utility template dispatcher function for horizontal_blur. Templated by buffer data type T. //! //! \param[in] in source buffer //! \param[in,out] out target buffer //! \param[in] w image width //! \param[in] h image height //! \param[in] c image channels //! \param[in] r box dimension //! template<typename T> void horizontal_blur(const T * in, T * out, const int w, const int h, const int c, const int r) { switch(c) { case 1: horizontal_blur<T,1>(in, out, w, h, r); break; case 2: horizontal_blur<T,2>(in, out, w, h, r); break; case 3: horizontal_blur<T,3>(in, out, w, h, r); break; case 4: horizontal_blur<T,4>(in, out, w, h, r); break; default: printf("horizontal_blur over %d channels is not supported yet. Add a specific case if possible or fall back to the generic version.\n", c); break; // default: horizontal_blur<T>(in, out, w, h, c, r); break; } } //! //! \brief This function performs a 2D tranposition of an image. //! //! The transposition is done per //! block to reduce the number of cache misses and improve cache coherency for large image buffers. //! Templated by buffer data type T and buffer number of channels C. //! //! \param[in] in source buffer //! \param[in,out] out target buffer //! \param[in] w image width //! \param[in] h image height //! template<typename T, int C> void flip_block(const T * in, T * out, const int w, const int h) { constexpr int block = 256/C; #pragma omp parallel for collapse(2) for(int x= 0; x < w; x+= block) for(int y= 0; y < h; y+= block) { const T * p = in + y*w*C + x*C; T * q = out + y*C + x*h*C; const int blockx= min(w, x+block) - x; const int blocky= min(h, y+block) - y; for(int xx= 0; xx < blockx; xx++) { for(int yy= 0; yy < blocky; yy++) { for(int k= 0; k < C; k++) q[k]= p[k]; p+= w*C; q+= C; } p+= -blocky*w*C + C; q+= -blocky*C + h*C; } } } //! //! \brief Utility template dispatcher function for flip_block. Templated by buffer data type T. //! //! \param[in] in source buffer //! \param[in,out] out target buffer //! \param[in] w image width //! \param[in] h image height //! \param[in] c image channels //! template<typename T> void flip_block(const T * in, T * out, const int w, const int h, const int c) { switch(c) { case 1: flip_block<T,1>(in, out, w, h); break; case 2: flip_block<T,2>(in, out, w, h); break; case 3: flip_block<T,3>(in, out, w, h); break; case 4: flip_block<T,4>(in, out, w, h); break; default: printf("flip_block over %d channels is not supported yet. Add a specific case if possible or fall back to the generic version.\n", c); break; // default: flip_block<T>(in, out, w, h, c); break; } } //! //! \brief This function converts the standard deviation of //! Gaussian blur into a box radius for each box blur pass. //! //! For further details please refer to : //! - https://www.peterkovesi.com/papers/FastGaussianSmoothing.pdf //! //! \param[out] boxes box radiis for kernel sizes of 2*boxes[i]+1 //! \param[in] sigma Gaussian standard deviation //! \param[in] n number of box blur pass //! void sigma_to_box_radius(int boxes[], const float sigma, const int n) { // ideal filter width float wi = sqrt((12*sigma*sigma/n)+1); int wl = wi; // no need std::floor if(wl%2==0) wl--; int wu = wl+2; float mi = (12*sigma*sigma - n*wl*wl - 4*n*wl - 3*n)/(-4*wl - 4); int m = mi+0.5f; // avoid std::round by adding 0.5f and cast to integer type for(int i=0; i<n; i++) boxes[i] = ((i < m ? wl : wu) - 1) / 2; } //! //! \brief This function performs a fast Gaussian blur. Templated by buffer data type T and number of passes N. //! //! Applying several times box blur tends towards a true Gaussian blur (thanks TCL). Three passes are sufficient //! for good results. Templated by buffer data type T and number of passes N. The input buffer is also used //! as temporary and modified during the process hence it can not be constant. //! //! Usually the process should alternate between horizontal and vertical passes //! as much times as we want box blur passes. However thanks to box blur properties //! the separable passes can be performed in any order without changing the result. //! Hence for performance purposes the algorithm is: //! - apply N times horizontal blur (horizontal passes) //! - flip the image buffer (transposition) //! - apply N times horizontal blur (vertical passes) //! - flip the image buffer (transposition) //! //! We provide two version of the function: //! - generic N passes (in which more std::swap are used) //! - specialized 3 passes only //! //! \param[in,out] in source buffer reference ptr //! \param[in,out] out target buffer reference ptr //! \param[in] w image width //! \param[in] h image height //! \param[in] c image channels //! \param[in] sigma Gaussian standard deviation //! template<typename T, unsigned int N> void fast_gaussian_blur(T * in, T * out, const int w, const int h, const int c, const float sigma) { // compute box kernel sizes int boxes[N]; sigma_to_box_radius(boxes, sigma, N); // perform N horizontal blur passes for(int i = 0; i < N; ++i) { horizontal_blur(in, out, w, h, c, boxes[i]); std::swap(in, out); } // flip buffer flip_block(in, out, w, h, c); std::swap(in, out); // perform N horizontal blur passes on flipped image for(int i = 0; i < N; ++i) { horizontal_blur(in, out, h, w, c, boxes[i]); std::swap(in, out); } // flip buffer flip_block(in, out, h, w, c); } //! //! \brief Specialized 3 passes of separable fast box blur with less std::swap. Templated by buffer data type T. //! //! Applying several times box blur tends towards a true Gaussian blur (thanks TCL). Three passes are sufficient //! for good results. Templated by buffer data type T and number of passes N. The input buffer is also used //! as temporary and modified during the process hence it can not be constant. //! //! Usually the process should alternate between horizontal and vertical passes //! as much times as we want box blur passes. However thanks to box blur properties //! the separable passes can be performed in any order without changing the result. //! Hence for performance purposes the algorithm is: //! - apply N times horizontal blur (horizontal passes) //! - flip the image buffer (transposition) //! - apply N times horizontal blur (vertical passes) //! - flip the image buffer (transposition) //! //! We provide two version of the function: //! - generic N passes (in which more std::swap are used) //! - specialized 3 passes only //! //! \param[in,out] in source buffer reference ptr //! \param[in,out] out target buffer reference ptr //! \param[in] w image width //! \param[in] h image height //! \param[in] c image channels //! \param[in] sigma Gaussian standard deviation //! template<typename T> void fast_gaussian_blur(T * in, T * out, const int w, const int h, const int c, const float sigma) { // compute box kernel sizes int boxes[3]; sigma_to_box_radius(boxes, sigma, 3); // perform 3 horizontal blur passes horizontal_blur(in, out, w, h, c, boxes[0]); horizontal_blur(out, in, w, h, c, boxes[1]); horizontal_blur(in, out, w, h, c, boxes[2]); // flip buffer flip_block(out, in, w, h, c); // perform 3 horizontal blur passes on flipped image horizontal_blur(in, out, h, w, c, boxes[0]); horizontal_blur(out, in, h, w, c, boxes[1]); horizontal_blur(in, out, h, w, c, boxes[2]); // flip buffer flip_block(out, in, h, w, c); // swap pointers to get result in the ouput buffer std::swap(in, out); } //! //! \brief Utility template dispatcher function for fast_gaussian_blur. Templated by buffer data type T. //! //! This is the main exposed function and the one that should be used in programs. //! //! \todo Make border policies an argument of this function. //! //! \param[in,out] in source buffer reference ptr //! \param[in,out] out target buffer reference ptr //! \param[in] w image width //! \param[in] h image height //! \param[in] c image channels //! \param[in] sigma Gaussian standard deviation //! \param[in] n number of passes, should be > 0 //! template<typename T> void fast_gaussian_blur(T * in, T * out, const int w, const int h, const int c, const float sigma, const unsigned int n) { switch(n) { case 1: fast_gaussian_blur<T,1>(in, out, w, h, c, sigma); break; case 2: fast_gaussian_blur<T,2>(in, out, w, h, c, sigma); break; case 3: fast_gaussian_blur<T>(in, out, w, h, c, sigma); break; // specialized 3 passes version case 4: fast_gaussian_blur<T,4>(in, out, w, h, c, sigma); break; case 5: fast_gaussian_blur<T,5>(in, out, w, h, c, sigma); break; case 6: fast_gaussian_blur<T,6>(in, out, w, h, c, sigma); break; case 7: fast_gaussian_blur<T,7>(in, out, w, h, c, sigma); break; case 8: fast_gaussian_blur<T,8>(in, out, w, h, c, sigma); break; case 9: fast_gaussian_blur<T,9>(in, out, w, h, c, sigma); break; case 10: fast_gaussian_blur<T,10>(in, out, w, h, c, sigma); break; default: printf("fast_gaussian_blur with %d passes is not supported yet. Add a specific case if possible or fall back to the generic version.\n", n); break; // default: fast_gaussian_blur<T,10>(in, out, w, h, c, sigma, n); break; } }
GB_binop__islt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__islt_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint8) // A*D function (colscale): GB (_AxD__islt_uint8) // D*A function (rowscale): GB (_DxB__islt_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint8) // C=scalar+B GB (_bind1st__islt_uint8) // C=scalar+B' GB (_bind1st_tran__islt_uint8) // C=A+scalar GB (_bind2nd__islt_uint8) // C=A'+scalar GB (_bind2nd_tran__islt_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
irbuilder_unroll_partial_heuristic_constant_for.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics // REQUIRES: x86-registered-target // TODO: The unroll-factor heuristic might be able to use the information that the trip count is constant, but currently is not able to determine that. #ifndef HEADER #define HEADER double sind(double); // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_constant_for( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[E_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[OFFSET_ADDR:.+]] = alloca float, align 4 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store float* %[[E:.+]], float** %[[E_ADDR]], align 8 // CHECK-NEXT: store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 4 // CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 4 // CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0 // CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32 // CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]] // CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1) // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]] // CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_HEADER]]: // CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_COND]]: // CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]] // CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_BODY]]: // CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]] // CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]] // CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 4 // CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_HEADER]]: // CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_COND]]: // CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]] // CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_BODY]]: // CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 4, %[[TMP12]] // CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]] // CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[CONV:.+]] = fpext float %[[TMP19]] to double // CHECK-NEXT: %[[CALL:.+]] = call double @sind(double noundef %[[CONV]]) // CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[CONV4:.+]] = fpext float %[[TMP22]] to double // CHECK-NEXT: %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]] // CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM5:.+]] = sext i32 %[[TMP24]] to i64 // CHECK-NEXT: %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM5]] // CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX6]], align 4 // CHECK-NEXT: %[[CONV7:.+]] = fpext float %[[TMP25]] to double // CHECK-NEXT: %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]] // CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[E_ADDR]], align 8 // CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM9:.+]] = sext i32 %[[TMP27]] to i64 // CHECK-NEXT: %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM9]] // CHECK-NEXT: %[[TMP28:.+]] = load float, float* %[[ARRAYIDX10]], align 4 // CHECK-NEXT: %[[CONV11:.+]] = fpext float %[[TMP28]] to double // CHECK-NEXT: %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]] // CHECK-NEXT: %[[TMP29:.+]] = load float, float* %[[OFFSET_ADDR]], align 4 // CHECK-NEXT: %[[CONV13:.+]] = fpext float %[[TMP29]] to double // CHECK-NEXT: %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]] // CHECK-NEXT: %[[TMP30:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP31:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM14:.+]] = sext i32 %[[TMP31]] to i64 // CHECK-NEXT: %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP30]], i64 %[[IDXPROM14]] // CHECK-NEXT: %[[TMP32:.+]] = load float, float* %[[ARRAYIDX15]], align 4 // CHECK-NEXT: %[[CONV16:.+]] = fpext float %[[TMP32]] to double // CHECK-NEXT: %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]] // CHECK-NEXT: %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float // CHECK-NEXT: store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4 // CHECK-NEXT: br label %[[OMP_TILE0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_INC]]: // CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1 // CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_EXIT]]: // CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_TILE0_AFTER]]: // CHECK-NEXT: br label %[[OMP_FLOOR0_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_INC]]: // CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1 // CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]]) // CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_FLOOR0_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic_constant_for(float *a, float *b, float *c, float *d, float *e, float offset) { #pragma omp for #pragma omp unroll partial for (int i = 0; i < 128; i++) { a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 128, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP8]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP9:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP9]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"} // CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
fixed_version.c
#include<stdio.h> int main(){ int T[10]; // initializing array T using openmp #pragma omp parallel for shared(T) for ( int i = 0; i < 10; i ++) { T[i] = i; } }
llg.c
#include "common_clib.h" /* The right hand side of the LLG equation for the CVOde solver. This can be * used both for the micromagnetic and atomistic codes since m or S are unitless * and the prefactors keep the same structure. * * The LLG equation has the structure: * ( * is for dot or scalar product) * * dm -gamma * ---- = -------- ( m X H_eff + a * m X ( m x H_eff ) ) * dt 2 * ( 1 + a ) * * where a is the Gilbert damping constant, gamma is the gyromagnetic * ratio ( gamma = 1.76e11 for a free electron; for the micromagnetic * case, we use gamma_0 = mu_0 * gamma = 2.21e5, for a free electron), * m is the magnetisation vector and H_eff is the effective field. * * In our calculation, we usually compute: * m x (m x H_eff) = ( m * H_eff) m - (m * m) H_eff * Then, if we define the perpendicular component of the effective * field as: * H_perp = H_eff - ( m * H_eff) m * we can write * dm -gamma * ---- = -------- ( m X H_perp - a * H_perp ) * dt 2 * ( 1 + a ) * since m X m = 0 and, theoretically, m * m = 1 (at zero temperature m has * fixed length). However, for the second term to the right hand side, * proportional to a, it is better to keep the m X ( m x H_eff ) term with (m * * m) for stability purposes, thus we use * * H_perp = (m * m) H_eff - ( m * H_eff) m * for both terms. * * Additionally, to preserve the magnetisation length, we need to correct the * dm / dt term for every time step, adding the following term to the right * hand size expression of the LLG: * dm dm 2 * ---- ---> ---- + c * ( 1 - m ) m * dt dt * with _________ * / 2 * c = 6 * / ( dm ) * / ( ---- ) * \/ ( dt ) * The correction must be introduced since numerically, m can change length * during a step of the integration (which would not occur if the integration * step is infinitely small), deviating from the real answer. If we just * rescaled m, we would have to recompute the effective field (m changes) and * also the solution would present jumps due to the rescaling. With the term * specified above, m stays close to 1, giving a more continuous solution, and * also the term stays as zero if m is exactly 1 (notice that if m increases, * the correction is negative and it decreases m length; similarly if m * decreases). The prefactor c must be specified because the correction term * must be sufficiently strong to affect the solution. Accordingly, we can * think of dm/dt as a kind of velocity that is proportional to the change of * rate of m, hence using its magnitude, the correction is stronger for large * deviations and weak for small deviations. The factor 6 is added ad-hoc, * which seems to work well when computing the solutions, but its specification * stills requires a more strict proof. It is worth mentioning that the norm of * dm/dt changes the time scaling by a factor proportional to 1/t, therefore in * the future we could try to estimate its * influence with more mathematical/numerical rigour and analyse an optimal * value for the prefactor (6). */ void llg_rhs(double *restrict dm_dt, double *restrict m, double *restrict h, double *restrict alpha, int *restrict pins, double gamma, int n, int do_precession, double default_c) { int i, j, k; double coeff, mm, mh, c; double hpi, hpj, hpk; #pragma omp parallel for private(i,j,k,coeff,mm, mh, c, hpi,hpj,hpk) for (int id = 0; id < n; id++) { // Indexes for the 3 components of the spin (magnetic moment) // at the i-th lattice (mesh) site --> x, y, z i = 3 * id; j = i + 1; k = i + 2; // Pinned spins do not follow the dynamical equation if (pins[id] > 0) { dm_dt[i] = 0; dm_dt[j] = 0; dm_dt[k] = 0; continue; } coeff = -gamma / (1.0 + alpha[id] * alpha[id]); // Dot products mm = m[i] * m[i] + m[j] * m[j] + m[k] * m[k]; mh = m[i] * h[i] + m[j] * h[j] + m[k] * h[k]; // Usually, m is normalised, i.e., mm=1; // so hp = mm.h - mh.m = -m x (m x h) // We set here the perpendicular componenet of the field // but using the (m * m) product hpi = mm * h[i] - mh * m[i]; hpj = mm * h[j] - mh * m[j]; hpk = mm * h[k] - mh * m[k]; // IMPORTANT: do not ignore mm !! // What we've found is that if we igonre mm, i.e. using // hpi = h[i] - mh * m[i]; // hpj = h[j] - mh * m[j]; // hpk = h[k] - mh * m[k]; // the micromagnetic standard problem 4 failed to converge (?) // // NOTE (Fri 08 Jul 2016 13:58): In fact, the problem converges but with 2 less // decimals of accuracy, compared with the OOMMF calculation double mth0 = 0, mth1 = 0, mth2 = 0; // The first term: m x H_eff = m x H_perp if (do_precession){ mth0 = cross_x(m[i], m[j], m[k], hpi, hpj, hpk); mth1 = cross_y(m[i], m[j], m[k], hpi, hpj, hpk); mth2 = cross_z(m[i], m[j], m[k], hpi, hpj, hpk); } // The RHS term of the LLG equation dm_dt[i] = coeff * (mth0 - hpi * alpha[id]); dm_dt[j] = coeff * (mth1 - hpj * alpha[id]); dm_dt[k] = coeff * (mth2 - hpk * alpha[id]); // In future, we will try the new method to integrate the LLG equation, // A mixed mid-point Runge-Kutta like scheme for the integration of // Landau-Lifshitz equation Journal of Applied Physics 115, 17D101 // (2014) if possible, we can combine it with adaptive step size, don't // know how to do but it's worth a try. if (default_c < 0){ c = 6 * sqrt(dm_dt[i] * dm_dt[i] + dm_dt[j] * dm_dt[j] + dm_dt[k] * dm_dt[k] ); } else { c = default_c; } //printf("%0.15g %0.15g\n", c, default_c); // Correct the RHS term to keep m normalised dm_dt[i] += c * (1 - mm) * m[i]; dm_dt[j] += c * (1 - mm) * m[j]; dm_dt[k] += c * (1 - mm) * m[k]; } } void llg_rhs_jtimes(double *restrict jtn, double *restrict m, double *restrict h, double *restrict mp, double *restrict hp, double *restrict alpha, int *restrict pins, double gamma, int n, int do_precession, double default_c) { //#pragma omp parallel for private(i,j,k,coeff,mm, mh, c, hpi,hpj,hpk) for (int id = 0; id < n; id++) { int i = 3 * id; int j = i + 1; int k = i + 2; if (pins[i]>0){ continue; } double coeff = -gamma/(1.0+alpha[i]*alpha[i]); if (do_precession){ jtn[i] = coeff*(cross_x(mp[i],mp[j],mp[k],h[i],h[j],h[k])+cross_x(m[i],m[j],m[k],hp[i],hp[j],hp[k])); jtn[j] = coeff*(cross_y(mp[i],mp[j],mp[k],h[i],h[j],h[k])+cross_y(m[i],m[j],m[k],hp[i],hp[j],hp[k])); jtn[k] = coeff*(cross_z(mp[i],mp[j],mp[k],h[i],h[j],h[k])+cross_z(m[i],m[j],m[k],hp[i],hp[j],hp[k])); }else{ jtn[i] = 0; jtn[j] = 0; jtn[k] = 0; } double mm = m[i]*m[i] + m[j]*m[j] + m[k]*m[k]; double mh = m[i]*h[i] + m[j]*h[j] + m[k]*h[k]; double mhp = m[i]*hp[i] + m[j]*hp[j] + m[k]*hp[k]; double mph = mp[i]*h[i] + mp[j]*h[j] + mp[k]*h[k]; double mmp = m[i]*mp[i] + m[j]*mp[j] + m[k]*mp[k]; jtn[i] += alpha[i]*coeff*((mph+mhp)*m[i]+mh*mp[i]-2*mmp*h[i]-mm*hp[i]); jtn[j] += alpha[i]*coeff*((mph+mhp)*m[j]+mh*mp[j]-2*mmp*h[j]-mm*hp[j]); jtn[k] += alpha[i]*coeff*((mph+mhp)*m[k]+mh*mp[k]-2*mmp*h[k]-mm*hp[k]); if (default_c>0){ jtn[i] += default_c *((1-mm)*mp[i]-2*mmp*m[i]); jtn[j] += default_c *((1-mm)*mp[j]-2*mmp*m[j]); jtn[k] += default_c *((1-mm)*mp[k]-2*mmp*m[k]); } } }
GB_binop__times_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_int8 // A.*B function (eWiseMult): GB_AemultB__times_int8 // A*D function (colscale): GB_AxD__times_int8 // D*A function (rowscale): GB_DxB__times_int8 // C+=B function (dense accum): GB_Cdense_accumB__times_int8 // C+=b function (dense accum): GB_Cdense_accumb__times_int8 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int8 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int8 // C=scalar+B GB_bind1st__times_int8 // C=scalar+B' GB_bind1st_tran__times_int8 // C=A+scalar GB_bind2nd__times_int8 // C=A'+scalar GB_bind2nd_tran__times_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT8 || GxB_NO_TIMES_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_nvec_nonempty.c
//------------------------------------------------------------------------------ // GB_nvec_nonempty: count the number of non-empty vectors //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // All pending tuples are ignored. If a vector has all zombies it is still // counted as non-empty. #include "GB.h" GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only int64_t GB_nvec_nonempty // return # of non-empty vectors ( const GrB_Matrix A, // input matrix to examine GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (A != NULL) ; ASSERT (GB_ZOMBIES_OK (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (GB_PENDING_OK (A)) ; //-------------------------------------------------------------------------- // trivial cases //-------------------------------------------------------------------------- if (GB_IS_FULL (A) || GB_IS_BITMAP (A)) { // A is full or bitmap; nvec_nonempty depends only on the dimensions return ((A->vlen == 0) ? 0 : A->vdim) ; } if (GB_NNZ (A) == 0) { // A is sparse or hypersparse, with no entries return (0) ; } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- int64_t anvec = A->nvec ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // count the non-empty columns //-------------------------------------------------------------------------- int64_t nvec_nonempty = 0 ; const int64_t *GB_RESTRICT Ap = A->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nvec_nonempty) for (k = 0 ; k < anvec ; k++) { if (Ap [k] < Ap [k+1]) nvec_nonempty++ ; } ASSERT (nvec_nonempty >= 0 && nvec_nonempty <= A->vdim) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- return (nvec_nonempty) ; }
graph_generator.c
/* Copyright (C) 2009-2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include <stdlib.h> #include <stdint.h> #include <assert.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include "user_settings.h" #include "splittable_mrg.h" #include "graph_generator.h" /* Initiator settings: for faster random number generation, the initiator * probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR / * INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR / * INITIATOR_DENOMINATOR, d = 1 - a - b - c. */ #define INITIATOR_A_NUMERATOR 5700 #define INITIATOR_BC_NUMERATOR 1900 #define INITIATOR_DENOMINATOR 10000 /* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL / * INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise * into the graph parameters. The approach used is from "A Hitchhiker's Guide * to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali * Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that * the adjustment here is chosen based on the current level being processed * rather than being chosen randomly. */ #define SPK_NOISE_LEVEL 0 /* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */ static int generate_4way_bernoulli(mrg_state* st, int level, int nlevels) { /* Generator a pseudorandom number in the range [0, INITIATOR_DENOMINATOR) * without modulo bias. */ static const uint32_t limit = (UINT32_C(0xFFFFFFFF) % INITIATOR_DENOMINATOR); uint32_t val = mrg_get_uint_orig(st); if (/* Unlikely */ val < limit) { do { val = mrg_get_uint_orig(st); } while (val < limit); } #if SPK_NOISE_LEVEL == 0 int spk_noise_factor = 0; #else int spk_noise_factor = 2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL; #endif int adjusted_bc_numerator = INITIATOR_BC_NUMERATOR + spk_noise_factor; val %= INITIATOR_DENOMINATOR; if (val < adjusted_bc_numerator) return 1; val -= adjusted_bc_numerator; if (val < adjusted_bc_numerator) return 2; val -= adjusted_bc_numerator; #if SPK_NOISE_LEVEL == 0 if (val < INITIATOR_A_NUMERATOR) return 0; #else if (val < INITIATOR_A_NUMERATOR * (INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) / (INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator)) return 0; #endif return 3; } /* Reverse bits in a number; this should be optimized for performance * (including using bit- or byte-reverse intrinsics if your platform has them). * */ static inline uint64_t bitreverse(uint64_t x) { #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) #define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */ #endif #ifdef FAST_64BIT_ARITHMETIC /* 64-bit code */ #ifdef USE_GCC_BYTESWAP x = __builtin_bswap64(x); #else x = (x >> 32) | (x << 32); x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((x & UINT64_C(0x0000FFFF0000FFFF)) << 16); x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((x & UINT64_C(0x00FF00FF00FF00FF)) << 8); #endif x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4); x = ((x >> 2) & UINT64_C(0x3333333333333333)) | ((x & UINT64_C(0x3333333333333333)) << 2); x = ((x >> 1) & UINT64_C(0x5555555555555555)) | ((x & UINT64_C(0x5555555555555555)) << 1); return x; #else /* 32-bit code */ uint32_t h = (uint32_t)(x >> 32); uint32_t l = (uint32_t)(x & UINT32_MAX); #ifdef USE_GCC_BYTESWAP h = __builtin_bswap32(h); l = __builtin_bswap32(l); #else h = (h >> 16) | (h << 16); l = (l >> 16) | (l << 16); h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8); l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8); #endif h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4); l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4); h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2); l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2); h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1); l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1); return ((uint64_t)l << 32) | h; /* Swap halves */ #endif } /* Apply a permutation to scramble vertex numbers; a randomly generated * permutation is not used because applying it at scale is too expensive. */ static inline int64_t scramble(int64_t v0, int lgN, uint64_t val0, uint64_t val1) { uint64_t v = (uint64_t)v0; v += val0 + val1; v *= (val0 | UINT64_C(0x4519840211493211)); v = (bitreverse(v) >> (64 - lgN)); assert ((v >> lgN) == 0); v *= (val1 | UINT64_C(0x3050852102C843A5)); v = (bitreverse(v) >> (64 - lgN)); assert ((v >> lgN) == 0); return (int64_t)v; } /* Make a single graph edge using a pre-set MRG state. */ static void make_one_edge(int64_t nverts, int level, int lgN, mrg_state* st, packed_edge* result, uint64_t val0, uint64_t val1) { int64_t base_src = 0, base_tgt = 0; while (nverts > 1) { int square = generate_4way_bernoulli(st, level, lgN); int src_offset = square / 2; int tgt_offset = square % 2; assert (base_src <= base_tgt); if (base_src == base_tgt) { /* Clip-and-flip for undirected graph */ if (src_offset > tgt_offset) { int temp = src_offset; src_offset = tgt_offset; tgt_offset = temp; } } nverts /= 2; ++level; base_src += nverts * src_offset; base_tgt += nverts * tgt_offset; } write_edge(result, scramble(base_src, lgN, val0, val1), scramble(base_tgt, lgN, val0, val1)); } /* Generate a range of edges (from start_edge to end_edge of the total graph), * writing into elements [0, end_edge - start_edge) of the edges array. This * code is parallel on OpenMP and XMT; it must be used with * separately-implemented SPMD parallelism for MPI. */ void generate_kronecker_range( const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1), not all zero */, int logN /* In base 2 */, int64_t start_edge, int64_t end_edge, packed_edge* edges) { mrg_state state; int64_t nverts = (int64_t)1 << logN; int64_t ei; mrg_seed(&state, seed); uint64_t val0, val1; /* Values for scrambling */ { mrg_state new_state = state; mrg_skip(&new_state, 50, 7, 0); val0 = mrg_get_uint_orig(&new_state); val0 *= UINT64_C(0xFFFFFFFF); val0 += mrg_get_uint_orig(&new_state); val1 = mrg_get_uint_orig(&new_state); val1 *= UINT64_C(0xFFFFFFFF); val1 += mrg_get_uint_orig(&new_state); } #ifdef _OPENMP #pragma omp parallel for #endif #ifdef __MTA__ #pragma mta assert parallel #pragma mta block schedule #endif for (ei = start_edge; ei < end_edge; ++ei) { mrg_state new_state = state; mrg_skip(&new_state, 0, ei, 0); make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0, val1); } }
GB_unaryop__ainv_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int64 // op(A') function: GB_tran__ainv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int64 ( uint64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrixstrassen.h
//================================================================================== // BSD 2-Clause License // // Copyright (c) 2014-2022, NJIT, Duality Technologies Inc. and other contributors // // All rights reserved. // // Author TPOC: contact@openfhe.org // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //================================================================================== /* matrix strassen operations */ #ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H #define LBCRYPTO_MATH_MATRIXSTRASSEN_H #include <assert.h> #include <memory> #include <vector> #include "math/matrix.h" namespace lbcrypto { template <class Element> class MatrixStrassen { // TODO : public Serializable { public: typedef std::vector<std::vector<Element>> data_t; typedef std::vector<Element> lineardata_t; typedef typename std::vector<Element>::iterator it_lineardata_t; typedef std::function<Element(void)> alloc_func; /** * Constructor that initializes matrix values using a zero allocator * * @param &allocZero lambda function for zero initialization. * @param &rows number of rows. * @param &rows number of columns. */ MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) { data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * Constructor that initializes matrix values using a distribution generation * allocator * * @param &allocZero lambda function for zero initialization (used for * initializing derived matrix objects) * @param &rows number of rows. * @param &rows number of columns. * @param &allocGen lambda function for intialization using a distribution * generator. */ MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen); /** * Constructor of an empty matrix; SetSize must be called on this matrix to * use it Basically this exists to support deserializing * * @param &allocZero lambda function for zero initialization. */ explicit MatrixStrassen(alloc_func allocZero) : data(), rows(0), cols(0), allocZero(allocZero) {} void SetSize(size_t rows, size_t cols) { if (this->rows != 0 || this->cols != 0) { OPENFHE_THROW(not_available_error, "You cannot SetSize on a non-empty matrix"); } this->rows = rows; this->cols = cols; data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * Copy constructor * * @param &other the matrix object to be copied */ MatrixStrassen(const MatrixStrassen<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) { deepCopyData(other.data); } /** * Assignment operator * * @param &other the matrix object whose values are to be copied * @return the resulting matrix */ inline MatrixStrassen<Element>& operator=(const MatrixStrassen<Element>& other); /** * In-place change of the current matrix to a matrix of all ones * * @return the resulting matrix */ inline MatrixStrassen<Element>& Ones(); /** * Fill matrix using the same element * * @param &val the element the matrix is filled by * * @return the resulting matrix */ inline MatrixStrassen<Element>& Fill(const Element& val); /** * In-place change of the current matrix to Identity matrix * * @return the resulting matrix */ inline MatrixStrassen<Element>& Identity(); /** * Sets the first row to be powers of two * * @return the resulting matrix */ inline MatrixStrassen<Element> GadgetVector(int32_t base = 2) const; /** * Computes the infinity norm * * @return the norm in double format */ inline double Norm() const; /** * Operator for matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ inline MatrixStrassen<Element> operator*(MatrixStrassen<Element> const& other) const { return Mult(other); } /** * Multiplication of matrix by a scalar * * @param &other the multiplier element * @return the result of multiplication */ inline MatrixStrassen<Element> ScalarMult(Element const& other) const { MatrixStrassen<Element> result(*this); #pragma omp parallel for for (int32_t col = 0; col < result.cols; ++col) { for (int32_t row = 0; row < result.rows; ++row) { *result.data[row][col] = *result.data[row][col] * other; } } return result; } /** * Operator for scalar multiplication * * @param &other the multiplier element * @return the result of multiplication */ inline MatrixStrassen<Element> operator*(Element const& other) const { return ScalarMult(other); } /** * Equality check * * @param &other the matrix object to compare to * @return the boolean result */ inline bool Equal(MatrixStrassen<Element> const& other) const { if (rows != other.rows || cols != other.cols) { return false; } for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { if (data[i][j] != other.data[i][j]) { return false; } } } return true; } /** * Operator for equality check * * @param &other the matrix object to compare to * @return the boolean result */ inline bool operator==(MatrixStrassen<Element> const& other) const { return Equal(other); } /** * Operator for non-equality check * * @param &other the matrix object to compare to * @return the boolean result */ inline bool operator!=(MatrixStrassen<Element> const& other) const { return !Equal(other); } /** * Get property to access the data as a vector of vectors * * @return the data as vector of vectors */ const data_t& GetData() const { return data; } /** * Get property to access the number of rows in the matrix * * @return the number of rows */ size_t GetRows() const { return rows; } /** * Get property to access the number of columns in the matrix * * @return the number of columns */ size_t GetCols() const { return cols; } /** * Get property to access the zero allocator for the matrix * * @return the lambda function corresponding to the element zero allocator */ alloc_func GetAllocator() const { return allocZero; } /** * Sets the evaluation or coefficient representation for all ring elements * that support the SetFormat method * * @param &format the enum value corresponding to coefficient or evaluation * representation */ void SetFormat(Format format); /** * MatrixStrassen addition * * @param &other the matrix to be added * @return the resulting matrix */ inline MatrixStrassen<Element> Add(MatrixStrassen<Element> const& other) const { if (rows != other.rows || cols != other.cols) { OPENFHE_THROW(math_error, "Addition operands have incompatible dimensions"); } MatrixStrassen<Element> result(*this); #pragma omp parallel for for (int32_t j = 0; j < cols; ++j) { for (int32_t i = 0; i < rows; ++i) { *result.data[i][j] += *other.data[i][j]; } } return result; } /** * Operator for matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ inline MatrixStrassen<Element> operator+(MatrixStrassen<Element> const& other) const { return this->Add(other); } /** * Operator for in-place addition * * @param &other the matrix to be added * @return the resulting matrix (same object) */ inline MatrixStrassen<Element>& operator+=(MatrixStrassen<Element> const& other); /** * MatrixStrassen substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ inline MatrixStrassen<Element> Sub(MatrixStrassen<Element> const& other) const { if (rows != other.rows || cols != other.cols) { OPENFHE_THROW(math_error, "Subtraction operands have incompatible dimensions"); } MatrixStrassen<Element> result(allocZero, rows, other.cols); #pragma omp parallel for for (int32_t j = 0; j < cols; ++j) { for (int32_t i = 0; i < rows; ++i) { *result.data[i][j] = *data[i][j] - *other.data[i][j]; } } return result; } /** * Operator for matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ inline MatrixStrassen<Element> operator-(MatrixStrassen<Element> const& other) const { return this->Sub(other); } /** * Operator for in-place matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix (same object) */ inline MatrixStrassen<Element>& operator-=(MatrixStrassen<Element> const& other); /** * MatrixStrassen transposition * * @return the resulting matrix */ inline MatrixStrassen<Element> Transpose() const; // YSP The signature of this method needs to be changed in the future /** * MatrixStrassen determinant - found using Laplace formula with complexity * O(d!), where d is the dimension * * @param *result where the result is stored */ inline void Determinant(Element* result) const; /** * Cofactor matrix - the matrix of determinants of the minors A_{ij} * multiplied by -1^{i+j} * * @return the cofactor matrix for the given matrix */ inline MatrixStrassen<Element> CofactorMatrixStrassen() const; /** * Add rows to bottom of the matrix * * @param &other the matrix to be added to the bottom of current matrix * @return the resulting matrix */ inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other); /** * Add columns the right of the matrix * * @param &other the matrix to be added to the right of current matrix * @return the resulting matrix */ inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other); /** * MatrixStrassen indexing operator - writeable instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ inline Element& operator()(size_t row, size_t col) { return data[row][col]; } /** * MatrixStrassen indexing operator - read-only instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ inline Element const& operator()(size_t row, size_t col) const { return data[row][col]; } /** * MatrixStrassen row extractor * * @param &row row index * @return the row at the index */ inline MatrixStrassen<Element> ExtractRow(size_t row) const { MatrixStrassen<Element> result(this->allocZero, 1, this->cols); int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(0, i) = **elem; i++; } return result; // return *this; } /** * Call switch format for each (ring) element * */ inline void SwitchFormat(); /** * MatrixStrassen multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other, int nrec = 0, int pad = -1) const; /* * Multiply the matrix by a vector whose elements are all 1's. This causes * the elements of each row of the matrix to be added and placed into the * corresponding position in the output vector. */ MatrixStrassen<Element> MultByUnityVector() const; /* * Multiply the matrix by a vector of random 1's and 0's, which is the same as * adding select elements in each row together. Return a vector that is a rows * x 1 matrix. */ MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const; private: struct MatDescriptor { int lda; int nrec; int nproc; int nprocr; int nprocc; int nproc_summa; int bs; }; const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor const int rank = 0, base = 0; mutable data_t data; size_t rows; mutable int rowpad = 0; size_t cols; mutable int colpad = 0; alloc_func allocZero; mutable char* pattern = nullptr; mutable int numAdd = 0; mutable int numMult = 0; mutable int numSub = 0; mutable MatDescriptor desc; mutable Element zeroUniquePtr = allocZero(); mutable int NUM_THREADS = 1; void multiplyInternalCAPS(it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t work) const; void strassenDFSCAPS(it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t workPassThrough) const; void block_multiplyCAPS(it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor d, it_lineardata_t workPassThrough) const; void LinearizeDataCAPS(lineardata_t* lineardataPtr) const; void UnlinearizeDataCAPS(lineardata_t* lineardataPtr) const; int getRank() const; void verifyDescriptor(MatDescriptor desc); long long numEntriesPerProc(MatDescriptor desc) const; // NOLINT // deep copy of data - used for copy constructor void deepCopyData(data_t const& src); void getData(const data_t& Adata, const data_t& Bdata, const data_t& Cdata, int row, int inner, int col) const; void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const; void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const; void addMatricesCAPS(int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B) const; void addSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2, it_lineardata_t S21, it_lineardata_t S22) const; void subMatricesCAPS(int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B) const; void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2, it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const; void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2, it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const; void distributeFrom1ProcCAPS(MatDescriptor desc, it_lineardata_t O, it_lineardata_t I) const; void collectTo1ProcCAPS(MatDescriptor desc, it_lineardata_t O, it_lineardata_t I) const; void sendBlockCAPS(int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldi) const; void receiveBlockCAPS(int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldo) const; void distributeFrom1ProcRecCAPS(MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldi) const; void collectTo1ProcRecCAPS(MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldo) const; }; /** * Operator for scalar multiplication of matrix * * @param &e element * @param &M matrix * @return the resulting matrix */ template <class Element> inline MatrixStrassen<Element> operator*(Element const& e, MatrixStrassen<Element> const& M) { return M.ScalarMult(e); } /** * Generates a matrix of rotations. See pages 7-8 of * https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat); /** * Each element becomes a square matrix with columns of that element's * rotations in coefficient form. See pages 7-8 of * https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ inline MatrixStrassen<BigVector> RotateVecResult(MatrixStrassen<Poly> const& inMat); /** * Stream output operator * * @param &os stream * @param &m matrix to be outputted * @return the chained stream */ template <class Element> inline std::ostream& operator<<(std::ostream& os, const MatrixStrassen<Element>& m); /** * Gives the Choleshky decomposition of the input matrix. * The assumption is that covariance matrix does not have large coefficients * because it is formed by discrete gaussians e and s; this implies int32_t can * be used This algorithm can be further improved - see the Darmstadt paper * section 4.4 http://eprint.iacr.org/2013/297.pdf * * @param &input the matrix for which the Cholesky decomposition is to be * computed * @return the resulting matrix of floating-point numbers */ inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t>& input); /** * Convert a matrix of integers from BigInteger to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigInteger>& input, const BigInteger& modulus); /** * Convert a matrix of BigVector to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigVector>& input, const BigInteger& modulus); /** * Split a vector of int32_t into a vector of ring elements with ring dimension * n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const std::shared_ptr<ILParams> params); /** * Another method for splitting a vector of int32_t into a vector of ring * elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const std::shared_ptr<ILParams> params); } // namespace lbcrypto #endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; register ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickPathExtent]; const char *option; const StringInfo *profile; ssize_t option_type; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if (*magick_path == '\0' && *next->magick == '\0') (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; register const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); p++; length=0; blob=Base64Decode(p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; register Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { register Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { register ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
bml_normalize_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_normalize.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_add_ellpack.h" #include "bml_allocate_ellpack.h" #include "bml_normalize_ellpack.h" #include "bml_scale_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <float.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /* Normalize ellpack matrix given Gershgorin bounds. * * \ingroup normalize_group * * \param A The matrix * \param mineval Calculated min value * \param maxeval Calculated max value */ void TYPED_FUNC( bml_normalize_ellpack) ( bml_matrix_ellpack_t * A, double mineval, double maxeval) { double maxminusmin = maxeval - mineval; double gershfact = maxeval / maxminusmin; REAL_T scalar = (REAL_T) - 1.0 / maxminusmin; double threshold = 0.0; bml_scale_inplace_ellpack(&scalar, A); bml_add_identity_ellpack(A, gershfact, threshold); } /** Calculate Gershgorin bounds for an ellpack matrix. * * \ingroup normalize_group * * \param A The matrix * \param nrows Number of rows to use * returns mineval Calculated min value * returns maxeval Calculated max value */ void *TYPED_FUNC( bml_gershgorin_ellpack) ( bml_matrix_ellpack_t * A) { REAL_T radius, absham, dvalue; double emin = DBL_MAX; double emax = DBL_MIN; double *eval = bml_allocate_memory(sizeof(double) * 2); int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; int myRank = bml_getMyRank(); REAL_T rad[N]; REAL_T dval[N]; REAL_T *A_value = (REAL_T *) A->value; #pragma omp parallel for \ shared(N, M, A_nnz, A_index, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(rad, dval) \ private(absham, radius, dvalue) \ reduction(max:emax) \ reduction(min:emin) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { radius = 0.0; dvalue = 0.0; for (int j = 0; j < A_nnz[i]; j++) { if (i == A_index[ROWMAJOR(i, j, N, M)]) dvalue = A_value[ROWMAJOR(i, j, N, M)]; else { absham = ABS(A_value[ROWMAJOR(i, j, N, M)]); radius += (double) absham; } } dval[i] = dvalue; rad[i] = radius; /* emax = (emax > REAL_PART(dvalue + radius) ? emax : REAL_PART(dvalue + radius)); emin = (emin < REAL_PART(dvalue - radius) ? emin : REAL_PART(dvalue - radius)); */ } //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { if (REAL_PART(dval[i] + rad[i]) > emax) emax = REAL_PART(dval[i] + rad[i]); if (REAL_PART(dval[i] - rad[i]) < emin) emin = REAL_PART(dval[i] - rad[i]); } //printf("%d: emin = %e emax = %e\n", myRank, emin, emax); #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_minRealReduce(&emin); bml_maxRealReduce(&emax); } #endif eval[0] = emin; eval[1] = emax; //printf("Global %d: emin = %e emax = %e\n", myRank, emin, emax); return eval; } /** Calculate Gershgorin bounds for a partial ellpack matrix. * * \ingroup normalize_group * * \param A The matrix * \param nrows Number of rows to use * returns mineval Calculated min value * returns maxeval Calculated max value */ void *TYPED_FUNC( bml_gershgorin_partial_ellpack) ( bml_matrix_ellpack_t * A, int nrows) { REAL_T radius, absham, dvalue; double emin = DBL_MAX; double emax = DBL_MIN; double *eval = bml_allocate_memory(sizeof(double) * 2); int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; REAL_T rad[N]; REAL_T dval[N]; REAL_T *A_value = (REAL_T *) A->value; #pragma omp parallel for \ shared(N, M, A_nnz, A_index, A_value) \ shared(rad, dval) \ private(absham, radius, dvalue) \ reduction(max:emax) \ reduction(min:emin) for (int i = 0; i < nrows; i++) { radius = 0.0; dvalue = 0.0; for (int j = 0; j < A_nnz[i]; j++) { if (i == A_index[ROWMAJOR(i, j, N, M)]) dvalue = A_value[ROWMAJOR(i, j, N, M)]; else { absham = ABS(A_value[ROWMAJOR(i, j, N, M)]); radius += (double) absham; } } dval[i] = dvalue; rad[i] = radius; } for (int i = 0; i < nrows; i++) { if (REAL_PART(dval[i] + rad[i]) > emax) emax = REAL_PART(dval[i] + rad[i]); if (REAL_PART(dval[i] - rad[i]) < emin) emin = REAL_PART(dval[i] - rad[i]); } eval[0] = emin; eval[1] = emax; return eval; }
GB_unop__identity_int32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int64) // op(A') function: GB (_unop_tran__identity_int32_int64) // C type: int32_t // A type: int64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int64) ( int32_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); // C++AMP declarator diagnostic functions bool DiagnoseCXXAMPDecl(Decl* Dcl, bool CheckContainer = false, bool IsInfer = false); bool IsCXXAMPTileStatic(Declarator &D); void DiagnosticCXXAMPTileStatic(Declarator &D, Decl *Dcl); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); // C++AMP restriction specifier inferring routine void TryCXXAMPRestrictionInferring(Decl *D, Stmt *Body); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; // C++AMP diagnostic routine on destructor overload resolution void DiagnoseCXXAMPDtorOverload(FunctionDecl *New, const LookupResult &Old); OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // C++AMP restriction specifier scope checking routines bool IsInAMPRestricted(); // Determine if in CPU and/or AMP restricted codes bool IsInAnyExplicitRestricted(); void GetCXXAMPParentRestriction(Scope* SC, bool& ParentCPU, bool& ParentAMP, bool&ParentAUTO); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); // C++AMP diagnostic routine on overloaded call expressions void DiagnoseCXXAMPOverloadedCallExpr(SourceLocation LParenLoc, FunctionDecl* Callee); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; // C++AMP type checking routine for kernel codes public: bool IsIncompatibleType(const Type* Ty, bool CheckContainer = false, bool IsInfer = false); private: // C++AMP type checking routine for kernel codes bool IsCXXAMPUnsupportedPointerType(const Type* Ty, bool CheckContainer = false, bool IsInfer = false); bool IsCXXAMPUnsupportedReferenceType(const Type* Ty, bool CheckContainer = false, bool IsInfer = false); bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); // C++AMP diagnotic routine on C++ method call expressions void DiagnoseCXXAMPMethodCallExpr(SourceLocation LParenLoc, CXXMethodDecl *Callee); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); // C++AMP restriction specifier calculation routines for special member function void InheritSMFDtorIntersections(CXXRecordDecl* RDecl, bool& CPUAttr, bool& AMPAttr, bool& ParentCPUAttr, bool& ParentAMPAttr); void InheritSMFCtorIntersections(CXXRecordDecl* RDecl, bool& CPUAttr, bool& AMPAttr, bool& ParentCPUAttr, bool& ParentAMPAttr, int flag, bool ConstParam = true); void InheritSMFMethodIntersections(CXXRecordDecl* RDecl, bool& CPUAttr, bool& AMPAttr, bool& ParentCPUAttr, bool& ParentAMPAttr, int flag, bool ConstParam = true); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Defines an AMP CUP-side serialize function. void DefineAmpCpuSerializeFunction(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Defines an AMP GPU-side deserialize function. void DefineAmpGpuDeSerializeFunction(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare trampoline name lookup code for AMP CPU-side void DeclareAMPTrampolineName(CXXRecordDecl *ClassDecl, DeclarationName Name); /// Declare trampoline code for AMP GPU-side entry void DeclareAMPTrampoline(CXXRecordDecl *ClassDecl, DeclarationName Name); /// Define trampoline code for AMP GPU-side entry void DefineAMPTrampoline(SourceLocation CurrentLocation, CXXMethodDecl *OperatorCall); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); /// \brief Test if a given class requires a /// C++AMP deserializer declaration bool NeedAMPDeserializer(CXXRecordDecl *ClassDecl); /// \brief Test if a given class has a C++AMP deserializer declaration bool HasDeclaredAMPDeserializer(CXXRecordDecl *ClassDecl); // Declare C++AMP serializer and deserializer typedef SmallVector<QualType, 16> AMPDeserializerArgs; void DeclareAMPSerializer(CXXRecordDecl *ClassDecl, DeclarationName Name); void DeclareAMPDeserializer(CXXRecordDecl *ClassDecl, AMPDeserializerArgs *Args); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; // C++AMP diagnotic routine for template arguments void DiagnoseCXXAMPTemplateArgument(NamedDecl *Param, const TemplateArgumentLoc &AL, NamedDecl *Template, SourceLocation TemplateLoc); bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; // C++AMP diagnostic routine for expressions void DiagnoseCXXAMPExpr(Expr* Stripped, ExprResult &HS, bool DiagnoseWhenStatic=false); /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
scattering.c
/****************************************************************************** * * * SCATTERING.C * * * * RELATIVISTIC SCATTERING KERNEL * * * ******************************************************************************/ #include "decs.h" #if RADIATION // RADIATION HOT CROSS SECTION #define NW 220 #define NT 90 #define MINW (1.e-16) // w = h nu/m_e c^2 // #define MAXW (1.e10) #define MINT (0.001) // Theta = k_b T / m c^2, m = mass of scatterer #define MAXT (1.e11) #define MAXGAMMA (12.) #define DMUE (0.05) #define DGAMMAE (0.05) #define N_HC_TABLES (1) #define TRUNCATE_HOTCROSS (1) #if RADIATION == RADTYPE_NEUTRINOS #if MULTISCATT_TEST const char *HOTCROSS[N_HC_TABLES] = {"hotcross_multiscatt.dat"}; #else const char *HOTCROSS[N_HC_TABLES] = {"hotcross_quad.dat"}; #endif #else const char *HOTCROSS[N_HC_TABLES] = {"hotcross.dat"}; #endif double table[N_HC_TABLES][NW + 1][NT + 1]; double MAXW, dlw, dlT, lminw, lmint; void sample_gas_particle(double Ktetrad[NDIM], double Pelectron[NDIM], const struct of_microphysics *m, int type, int interaction); void sample_beta(double Thetae, double *gamma_e, double *beta_e); double sample_y(double Thetae); double sample_mu(double beta_e); double get_total_cross_section(double k, const struct of_microphysics *m, int type, int interaction, int normalized); void sample_scattered_rad(double k[NDIM], double p[NDIM], double kp[NDIM], const struct of_microphysics *m, int type, int interaction); void boost(double v[NDIM], double u[NDIM], double vp[NDIM]); void sample_cross_section(double k, double *k0p, double *cth, const struct of_microphysics *m, int type, int interaction); double total_cross_num(hc_ftype f, double w, double Thetae); double dNdgammae(double thetae, double gammae); double boostcross(hc_ftype f, double w, double mue, double gammae); void init_hc_table( hc_ftype f, double table[NW + 1][NT + 1], const char *hc_name); double interpolate_hc_table(double w, double theta, hc_ftype f, double table[NW + 1][NT + 1], const char *hc_name); // rejection sampling void rejection_sample(dsdom_ftype f, double xmax, double k, double *k0p, double *cth, const struct of_microphysics *m, int type, int interaction); // compton scattering double sample_klein_nishina(double k0); double klein_nishina(double a, double ap); double hc_klein_nishina(double we, double mue); // Neutrino physics double hc_quad(double we, double mue); double hc_quad_max(); double nu_cross_factor( double sigma, int type, int interaction, const struct of_microphysics *m); double total_cross_ions(double sigma_hc, double A, double Z); double nu_cross_delta(int type, int interaction); double nu_cross(double w, double mu, const struct of_microphysics *m, int type, int interaction); double nu_cross_max(const struct of_microphysics *m, int type, int interaction); // multiscatt test #if MULTISCATT_TEST double hc_flat(double we, double mue); double ms_flat(double w, double mu, const struct of_microphysics *m, int type, int interaction); double ms_flat_max(const struct of_microphysics *m, int type, int interaction); #endif // Scattering temperature too small int scatt_temp_too_small(const struct of_microphysics *m) { #if MULTISCATT_TEST return 0; #else // normal scattering #if RADIATION == RADTYPE_LIGHT { return m->Thetae < 10. * SMALL; } #elif RADIATION == RADTYPE_NEUTRINOS { return (KBOL * m->T) / (ME * CL * CL) < 10. * SMALL; } #endif // neutrinos #endif // Not multiscatt test } int scatter_superphoton(grid_prim_type P, grid_eosvar_type extra, struct of_photon *ph, double X[NDIM], double Kcov[NDIM], double Kcon[NDIM], int interaction) { const int fail = 0; const int success = 1; double Pelectron[NDIM], gcov[NDIM][NDIM], gcon[NDIM][NDIM]; double Econ[NDIM][NDIM], Ecov[NDIM][NDIM]; double Ktetrad[NDIM], Ktetrad_scatt[NDIM]; int i, j, k; int bad_scatter = 0; Xtoijk(X, &i, &j, &k); set_gcov(X, gcov); gcon_func(gcov, gcon); normalize_null(gcov, Kcon); normalize_null_cov(gcon, Kcov); // Quality control if (ph->type == TYPE_TRACER) return fail; if (Kcon[0] < 0. || Kcov[0] > 0.) return fail; DLOOP1 { if (is_practically_nan(Kcon[mu]) || is_practically_nan(Kcov[mu])) { return fail; } } double Ucon[NDIM], Ucov[NDIM], Bcon[NDIM], Bcov[NDIM]; struct of_microphysics m; get_fluid_zone(i, j, k, P, extra, &m, Ucon, Ucov, Bcon, Bcov); if (scatt_temp_too_small(&m)) return fail; make_tetrad(i, j, k, Ucon, Bcon, gcov, Econ, Ecov); coord_to_tetrad(Ecov, Kcon, Ktetrad); DLOOP1 { if (is_practically_nan(Ktetrad[mu])) { bad_scatter = 1; } } if (bad_scatter) { fprintf(stderr, "Bad tetrad!\n" "\tUcon = [%e %e %e %e]\n" "\tUcov = [%e %e %e %e]\n" "\tBcon = [%e %e %e %e]\n" "\tBcov = [%e %e %e %e]\n" "\tKtetrad = [%e %e %e %e]\n" "\tKcoord = [%e %e %e %e]\n" "\n" "\tEcon = [%e %e %e %e]\n" "\t = [%e %e %e %e]\n" "\t = [%e %e %e %e]\n" "\t = [%e %e %e %e]\n" "\n" "\tEcov = [%e %e %e %e]\n" "\t = [%e %e %e %e]\n" "\t = [%e %e %e %e]\n" "\t = [%e %e %e %e]\n", Ucon[0], Ucon[1], Ucon[2], Ucon[3], Ucov[0], Ucov[1], Ucov[2], Ucov[3], Bcon[0], Bcon[1], Bcon[2], Bcon[3], Bcov[0], Bcov[1], Bcov[2], Bcov[3], Ktetrad[0], Ktetrad[1], Ktetrad[2], Ktetrad[3], Kcon[0], Kcon[1], Kcon[2], Kcon[3], Econ[0][0], Econ[0][1], Econ[0][2], Econ[0][3], Econ[1][0], Econ[1][1], Econ[1][2], Econ[1][3], Econ[2][0], Econ[2][1], Econ[2][2], Econ[2][3], Econ[3][0], Econ[3][1], Econ[3][2], Econ[3][3], Ecov[0][0], Ecov[0][1], Ecov[0][2], Ecov[0][3], Ecov[1][0], Ecov[1][1], Ecov[1][2], Ecov[1][3], Ecov[2][0], Ecov[2][1], Ecov[2][2], Ecov[2][3], Ecov[3][0], Ecov[3][1], Ecov[3][2], Ecov[3][3]); return fail; } sample_gas_particle(Ktetrad, Pelectron, &m, ph->type, interaction); DLOOP1 { if (is_practically_nan(Pelectron[mu])) { #if RADIATION == RADTYPE_LIGHT printf("m.Thetae = %e m.Ne = %e m.B = %e\n", m.Thetae, m.Ne, m.B); #endif printf("Pelectron[%i] = %e!\n", mu, Pelectron[mu]); printf( "K = %e %e %e %e\n", Ktetrad[0], Ktetrad[1], Ktetrad[2], Ktetrad[3]); printf("k.k = %e\n", -Ktetrad[0] * Ktetrad[0] + Ktetrad[1] * Ktetrad[1] + Ktetrad[2] * Ktetrad[2] + Ktetrad[3] * Ktetrad[3]); return fail; } } sample_scattered_rad( Ktetrad, Pelectron, Ktetrad_scatt, &m, ph->type, interaction); DLOOP1 { if (is_practically_nan(Ktetrad_scatt[mu])) { // printf("Ktetrad_scatt[%i] = %e!\n", mu, Ktetrad_scatt[mu]); bad_scatter = 1; } } // If scatter fails, return failure and complain if (bad_scatter) { fprintf(stderr, "Bad scatter after sample_scattered_rad:\n" "\tph->Kcon[2] is bad\n" "\tUcon = [%e %e %e %e]\n" "\tUcov = [%e %e %e %e]\n" "\tBcon = [%e %e %e %e]\n" "\tBcov = [%e %e %e %e]\n" "\tKtetrad = [%e %e %e %e]\n" "\tKcon = [%e %e %e %e]\n", Ucon[0], Ucon[1], Ucon[2], Ucon[3], Ucov[0], Ucov[1], Ucov[2], Ucov[3], Bcon[0], Bcon[1], Bcon[2], Bcon[3], Bcov[0], Bcov[1], Bcov[2], Bcov[3], Ktetrad[0], Ktetrad[1], Ktetrad[2], Ktetrad[3], ph->Kcon[2][0], ph->Kcon[2][1], ph->Kcon[2][2], ph->Kcon[2][3]); return fail; } // Check NAN after each of these tetrad_to_coord(Econ, Ktetrad_scatt, ph->Kcon[2]); DLOOP1 { if (is_practically_nan(ph->Kcon[2][mu])) { // fprintf(stderr,"ph->Kcon[2][%i] = %e!\n", mu, ph->Kcon[2][mu]); bad_scatter = 1; } } if (bad_scatter) { fprintf(stderr, "Bad scatter after tetrad_to_coord:\n" "\tph->Kcon[2] is bad\n" "\ttype = %d\n" "\tinteraction = %d\n" "\tUcon = [%e %e %e %e]\n" "\tUcov = [%e %e %e %e]\n" "\tBcon = [%e %e %e %e]\n" "\tBcov = [%e %e %e %e]\n" "\tKtetrad = [%e %e %e %e]\n" "\tKcon = [%e %e %e %e]\n", ph->type, interaction, Ucon[0], Ucon[1], Ucon[2], Ucon[3], Ucov[0], Ucov[1], Ucov[2], Ucov[3], Bcon[0], Bcon[1], Bcon[2], Bcon[3], Bcov[0], Bcov[1], Bcov[2], Bcov[3], Ktetrad[0], Ktetrad[1], Ktetrad[2], Ktetrad[3], ph->Kcon[2][0], ph->Kcon[2][1], ph->Kcon[2][2], ph->Kcon[2][3]); return fail; } // Ensure scattered superphoton is sane normalize_null(gcov, ph->Kcon[2]); DLOOP1 { if (is_practically_nan(ph->Kcon[2][mu])) { // fprintf(stderr,"after norm ph->Kcon[2][%i] = %e!\n", mu, // ph->Kcon[2][mu]); bad_scatter = 1; } } if (bad_scatter) { fprintf(stderr, "Bad scatter after normalize_Null:\n" "\tph-Kcon[2] is bad\n" "\ttype = %d\n" "\tinteraction = %d\n" "\tUcon = [%e %e %e %e]\n" "\tUcov = [%e %e %e %e]\n" "\tBcon = [%e %e %e %e]\n" "\tBcov = [%e %e %e %e]\n" "\tKtetrad = [%e %e %e %e]\n" "\tKcon = [%e %e %e %e]\n", ph->type, interaction, Ucon[0], Ucon[1], Ucon[2], Ucon[3], Ucov[0], Ucov[1], Ucov[2], Ucov[3], Bcon[0], Bcon[1], Bcon[2], Bcon[3], Bcov[0], Bcov[1], Bcov[2], Bcov[3], Ktetrad[0], Ktetrad[1], Ktetrad[2], Ktetrad[3], ph->Kcon[2][0], ph->Kcon[2][1], ph->Kcon[2][2], ph->Kcon[2][3]); return fail; } lower(ph->Kcon[2], gcov, ph->Kcov[2]); DLOOP1 { if (is_practically_nan(ph->Kcov[2][mu])) { // fprintf(stderr,"after lower ph->Kcov[2][%i] = %e!\n", mu, // ph->Kcov[2][mu]); bad_scatter = 1; } } if (bad_scatter) { fprintf(stderr, "Bad scatter after lower Kcon:\n" "\tph-Kcov[2] is bad\n" "\ttype = %d\n" "\tinteraction = %d\n" "\tUcon = [%e %e %e %e]\n" "\tUcov = [%e %e %e %e]\n" "\tBcon = [%e %e %e %e]\n" "\tBcov = [%e %e %e %e]\n" "\tKcon = [%e %e %e %e]\n" "\tKtetrad = [%e %e %e %e]\n" "\tKcov = [%e %e %e %e]\n", ph->type, interaction, Ucon[0], Ucon[1], Ucon[2], Ucon[3], Ucov[0], Ucov[1], Ucov[2], Ucov[3], Bcon[0], Bcon[1], Bcon[2], Bcon[3], Bcov[0], Bcov[1], Bcov[2], Bcov[3], Ktetrad[0], Ktetrad[1], Ktetrad[2], Ktetrad[3], ph->Kcon[2][0], ph->Kcon[2][1], ph->Kcon[2][2], ph->Kcon[2][3], ph->Kcov[2][0], ph->Kcov[2][1], ph->Kcov[2][2], ph->Kcov[2][3]); return fail; } // Ensure scattered superphoton is sane // normalize_null(gcov, ph->Kcon[2]); // normalize_null_cov(gcon, ph->Kcov[2]); /*if (ph->Kcov[2][0] > 0.) { printf("Kcov[0] > 0 after scattering!\n"); printf("ph->X[] = %e %e %e %e\n", ph->X[2][0], ph->X[2][1], ph->X[2][2], ph->X[2][3]); printf("ph->Kcon[] = %e %e %e %e\n", ph->Kcon[2][0], ph->Kcon[2][1], ph->Kcon[2][2], ph->Kcon[2][3]); printf("ph->Kcov[] = %e %e %e %e\n", ph->Kcov[2][0], ph->Kcov[2][1], ph->Kcov[2][2], ph->Kcov[2][3]); }*/ return success; } // Procedure from Canfield et al. 1987 void sample_gas_particle(double k[NDIM], double p[NDIM], const struct of_microphysics *m, int type, int interaction) { double beta_e, mu, phi, cphi, sphi, gamma_e, sigma; double K, sth, cth, x1, n0dotv0, v0, v1; double n0x, n0y, n0z; double v0x, v0y, v0z; double v1x, v1y, v1z; double v2x, v2y, v2z; int sample_cnt = 0; double Thetae = scatterer_dimensionless_temp(type, interaction, m); double factor = 1.0; do { sample_beta(Thetae, &gamma_e, &beta_e); mu = sample_mu(beta_e); // Sometimes |mu| > 1 from roundoff error. Fix it if (mu > 1.) mu = 1.; else if (mu < -1.) mu = -1; // Frequency in electron rest frame K = gamma_e * (1. - beta_e * mu) * k[0]; sigma = get_total_cross_section(K, m, type, interaction, 1); x1 = factor * get_rand(); sample_cnt++; if (sample_cnt > 1000000) { fprintf(stderr, "in sample_gas_particle:\n" "\t type, int, mu, gamma_e, K, sigma, x1, factor:\n" "\t%d %d %g %g %g %g %g %g %g\n", type, interaction, Thetae, mu, gamma_e, K, sigma, x1, factor); // Kluge to prevent stalling for large values of \Theta_e // TODO: does this work? // Thetae *= 0.5 ; factor *= 0.5; sample_cnt = 0; } } while (x1 >= sigma); // First unit vector for coordinate system v0x = k[1]; v0y = k[2]; v0z = k[3]; v0 = sqrt(v0x * v0x + v0y * v0y + v0z * v0z); v0x /= v0; v0y /= v0; v0z /= v0; // Pick zero-angle for coordinate system get_ran_dir_3d(&n0x, &n0y, &n0z); n0dotv0 = v0x * n0x + v0y * n0y + v0z * n0z; // Second unit vector v1x = n0x - (n0dotv0)*v0x; v1y = n0y - (n0dotv0)*v0y; v1z = n0z - (n0dotv0)*v0z; // Normalize v1 = sqrt(v1x * v1x + v1y * v1y + v1z * v1z); v1x /= v1; v1y /= v1; v1z /= v1; // Find one more unit vector using cross product; automatically normalized v2x = v0y * v1z - v0z * v1y; v2y = v0z * v1x - v0x * v1z; v2z = v0x * v1y - v0y * v1x; // Resolve new momentum vector along unit vectors and create a four-vector p phi = get_rand() * 2. * M_PI; // uniform orientation sphi = sin(phi); cphi = cos(phi); cth = mu; sth = sqrt(1. - mu * mu); p[0] = gamma_e; p[1] = gamma_e * beta_e * (cth * v0x + sth * (cphi * v1x + sphi * v2x)); p[2] = gamma_e * beta_e * (cth * v0y + sth * (cphi * v1y + sphi * v2y)); p[3] = gamma_e * beta_e * (cth * v0z + sth * (cphi * v1z + sphi * v2z)); if (beta_e < 0) { fprintf(stderr, "betae error: %g %g %g %g\n", p[0], p[1], p[2], p[3]); } } void sample_beta(double Thetae, double *gamma_e, double *beta_e) { double y = sample_y(Thetae); *gamma_e = y * y * Thetae + 1.; *beta_e = sqrt(1. - 1. / ((*gamma_e) * (*gamma_e))); *beta_e += SMALL; // to prevent numerical problems for zero temperature } double sample_y(double Thetae) { double S_3, pi_3, pi_4, pi_5, pi_6, y, x1, x2, x, prob, num, den; pi_3 = sqrt(M_PI) / 4.; pi_4 = sqrt(0.5 * Thetae) / 2.; pi_5 = 3. * sqrt(M_PI) * Thetae / 8.; pi_6 = Thetae * sqrt(0.5 * Thetae); S_3 = pi_3 + pi_4 + pi_5 + pi_6; pi_3 /= S_3; pi_4 /= S_3; pi_5 /= S_3; pi_6 /= S_3; int max_samp = 100000; int n = 0; do { n++; x1 = get_rand(); if (x1 < pi_3) { x = get_chisq(3); } else if (x1 < pi_3 + pi_4) { x = get_chisq(4); } else if (x1 < pi_3 + pi_4 + pi_5) { x = get_chisq(5); } else { x = get_chisq(6); } // Translate between Canfield et al. and standard chisq distribution y = sqrt(x / 2); x2 = get_rand(); num = sqrt(1. + 0.5 * Thetae * y * y); den = 1. + y * sqrt(0.5 * Thetae); prob = num / den; } while (x2 >= prob && n < max_samp); if (n >= max_samp) { fprintf(stderr, "FAILED TO SAMPLE Y! Thetae = %e\n", Thetae); exit(-1); } return y; } double sample_mu(double beta_e) { double mu, x1; x1 = get_rand(); mu = (1. - sqrt(1. + 2. * beta_e + beta_e * beta_e - 4. * beta_e * x1)) / beta_e; return mu; } /* * True total cross section * in rest frame of the scattering particle * Distinct from hot cross section, * which is looked up in total_cross_lkup */ double get_total_cross_section(double k, const struct of_microphysics *m, int type, int interaction, int normalized) { #if RADIATION == RADTYPE_LIGHT { double sigma = hc_klein_nishina(k, 0); if (!normalized) sigma *= THOMSON; return sigma; } #else // RADIATION == RADTYPE_NEUTRINOS { #if MULTISCATT_TEST { double sigma = hc_flat(k, 0); if (!normalized) { int i = interaction; sigma *= (2 * 2 * i + 1) * NUSIGMA0 * pow(ms_theta_nu0, 2.0) / (4); } return sigma; } #else // Normal neutrino scattering { // TODO: doesn't work with electrons double sigma = hc_quad(k, 0); if (normalized) { return sigma / hc_quad_max(); } return nu_cross_factor(sigma, type, interaction, m); } #endif // multiscatt test? } #endif // RADTYPE_NEUTRINOS } void sample_scattered_rad(double k[NDIM], double p[NDIM], double kp[NDIM], const struct of_microphysics *m, int type, int interaction) { double ke[4], kpe[4]; double k0p; double n0x, n0y, n0z, n0dotv0, v0x, v0y, v0z, v1x, v1y, v1z, v2x, v2y, v2z, v1, dir1, dir2, dir3; double cth, sth, phi, cphi, sphi; boost(k, p, ke); sample_cross_section(ke[0], &k0p, &cth, m, type, interaction); sth = sqrt(fabs(1. - cth * cth)); // Unit vector 1 for scattering coordinate system is oriented along initial // photon wavevector // Explicitly compute kemag instead of using ke[0] to ensure that photon is // created normalized and doesn't inherit the light cone errors from the // original photon double kemag = sqrt(ke[1] * ke[1] + ke[2] * ke[2] + ke[3] * ke[3]); v0x = ke[1] / kemag; v0y = ke[2] / kemag; v0z = ke[3] / kemag; // Randomly pick zero-angle for scattering coordinate system. get_ran_dir_3d(&n0x, &n0y, &n0z); n0dotv0 = v0x * n0x + v0y * n0y + v0z * n0z; // Unit vector 2 v1x = n0x - (n0dotv0)*v0x; v1y = n0y - (n0dotv0)*v0y; v1z = n0z - (n0dotv0)*v0z; v1 = sqrt(v1x * v1x + v1y * v1y + v1z * v1z); v1x /= v1; v1y /= v1; v1z /= v1; // Find one more unit vector using cross product; automatically normalized v2x = v0y * v1z - v0z * v1y; v2y = v0z * v1x - v0x * v1z; v2z = v0x * v1y - v0y * v1x; // Resolve new momentum vector along unit vectors // Create a four-vector p // Solve for orientation of scattered photon // Find phi for new photon phi = 2. * M_PI * get_rand(); sphi = sin(phi); cphi = cos(phi); p[1] *= -1.; p[2] *= -1.; p[3] *= -1.; dir1 = cth * v0x + sth * (cphi * v1x + sphi * v2x); dir2 = cth * v0y + sth * (cphi * v1y + sphi * v2y); dir3 = cth * v0z + sth * (cphi * v1z + sphi * v2z); kpe[0] = k0p; kpe[1] = k0p * dir1; kpe[2] = k0p * dir2; kpe[3] = k0p * dir3; // Transform k back to lab frame boost(kpe, p, kp); if (kp[0] < 0 || isnan(kp[0])) { fprintf(stderr, "in sample_scattered_photon:\n"); fprintf(stderr, "kp[0], kpe[0]: %g %g\n", kp[0], kpe[0]); fprintf(stderr, "kpe: %g %g %g %g\n", kpe[0], kpe[1], kpe[2], kpe[3]); fprintf(stderr, "k: %g %g %g %g\n", k[0], k[1], k[2], k[3]); fprintf(stderr, "p: %g %g %g %g\n", p[0], p[1], p[2], p[3]); fprintf(stderr, "kp: %g %g %g %g\n", kp[0], kp[1], kp[2], kp[3]); } } void boost(double v[NDIM], double u[NDIM], double vp[NDIM]) { double g, V, n1, n2, n3, gm1; g = u[0]; V = sqrt(fabs(1. - 1. / (g * g))); n1 = u[1] / (g * V + SMALL); n2 = u[2] / (g * V + SMALL); n3 = u[3] / (g * V + SMALL); gm1 = g - 1.; // Lorentz boost into frame u from lab frame vp[0] = u[0] * v[0] - (u[1]) * v[1] - (u[2]) * v[2] - (u[3]) * v[3]; vp[1] = -u[1] * v[0] + (1. + n1 * n1 * gm1) * v[1] + (n1 * n2 * gm1) * v[2] + (n1 * n3 * gm1) * v[3]; vp[2] = -u[2] * v[0] + (n2 * n1 * gm1) * v[1] + (1. + n2 * n2 * gm1) * v[2] + (n2 * n3 * gm1) * v[3]; vp[3] = -u[3] * v[0] + (n3 * n1 * gm1) * v[1] + (n3 * n2 * gm1) * v[2] + (1. + n3 * n3 * gm1) * v[3]; } void sample_cross_section(double k, double *k0p, double *cth, const struct of_microphysics *m, int type, int interaction) { #if RADIATION == RADTYPE_LIGHT { *k0p = sample_klein_nishina(k); *cth = 1. - 1. / (*k0p) + 1. / k; } #elif RADIATION == RADTYPE_NEUTRINOS { #if MULTISCATT_TEST { double xmax = ms_flat_max(m, type, interaction); rejection_sample(ms_flat, xmax, k, k0p, cth, m, type, interaction); } #else // Normal neutrino scattering { // TODO: doesn't work for electrons double xmax = nu_cross_max(m, type, interaction); rejection_sample(nu_cross, xmax, k, k0p, cth, m, type, interaction); } #endif // neutrino scattering } #endif // radiation type } double sample_klein_nishina(double k0) { double k0pmin, k0pmax, k0p_tent, x1; int n = 0; // A low efficiency sampling algorithm, particularly for large k0. Limiting // efficiency is log(2 k0)/(2 k0) k0pmin = k0 / (1. + 2. * k0); // at theta = Pi k0pmax = k0; // at theta = 0 do { // Tentative value k0p_tent = k0pmin + (k0pmax - k0pmin) * get_rand(); // Rejection sample in box of height = kn(kmin) x1 = 2. * (1. + 2. * k0 + 2. * k0 * k0) / (k0 * k0 * (1. + 2. * k0)); x1 *= get_rand(); n++; } while (x1 >= klein_nishina(k0, k0p_tent)); return k0p_tent; } double klein_nishina(double a, double ap) { double ch = 1. + 1. / a - 1. / ap; double kn = (a / ap + ap / a - 1. + ch * ch) / (a * a); return kn; } void init_all_hotcross() { #if RADIATION == RADTYPE_LIGHT { init_hc_table(hc_klein_nishina, table[0], HOTCROSS[0]); } #elif RADIATION == RADTYPE_NEUTRINOS { #if MULTISCATT_TEST { init_hc_table(hc_flat, table[0], HOTCROSS[0]); } #else // Normal neutrino scattering { init_hc_table(hc_quad, table[0], HOTCROSS[0]); } #endif // neutrino scattering type } #endif // RADIATION TYPE } void init_hc_table( hc_ftype f, double table[NW + 1][NT + 1], const char *hc_name) { int nread; double lw, lT; FILE * fp; MAXW = 100. * (HPL * numax) / (ME * CL * CL); dlw = log10(MAXW / MINW) / NW; dlT = log10(MAXT / MINT) / NT; lminw = log10(MINW); lmint = log10(MINT); // Create file if needed using IO proc if (mpi_io_proc()) { fp = fopen(hc_name, "r"); if (fp == NULL) { fprintf(stdout, "Making lookup table for %s cross section...\n", hc_name); #pragma omp parallel for collapse(2) for (int i = 0; i <= NW; i++) { for (int j = 0; j <= NT; j++) { lw = lminw + i * dlw; lT = lmint + j * dlT; table[i][j] = log10(total_cross_num(f, pow(10., lw), pow(10., lT))); if (isnan(table[i][j])) { fprintf(stderr, "NAN for %s cross section: %d %d %g %g\n", hc_name, i, j, lw, lT); exit(0); } } } fprintf(stdout, "Lookup table created.\n\n"); fprintf(stdout, "Writing lookup table to file...\n"); fp = fopen(hc_name, "w"); if (fp == NULL) { fprintf(stderr, "Couldn't write to file %s\n", hc_name); exit(0); } for (int i = 0; i <= NW; i++) { for (int j = 0; j <= NT; j++) { lw = lminw + i * dlw; lT = lmint + j * dlT; fprintf(fp, "%d %d %g %g %15.10g\n", i, j, lw, lT, table[i][j]); } } fprintf(stderr, "Lookup table written.\n\n"); } // fp == NULL fclose(fp); } // mpi_io_proc() mpi_barrier(); // Read lookup table with every MPI processor fp = fopen(hc_name, "r"); if (fp == NULL) { fprintf(stderr, "rank %i: file %s not found.\n", mpi_myrank(), hc_name); exit(-1); } for (int i = 0; i <= NW; i++) { for (int j = 0; j <= NT; j++) { nread = fscanf(fp, "%*d %*d %*f %*f %lf\n", &table[i][j]); if (isnan(table[i][j]) || nread != 1) { fprintf(stderr, "Error on table %s read: %d %d\n", hc_name, i, j); exit(0); } } } fclose(fp); } double interpolate_hc_table(double w, double thetae, hc_ftype f, double table[NW + 1][NT + 1], const char *hc_name) { int i, j; double lw, lT, di, dj, lcross; // DEBUG // double cross_section = RAD_SCATT_TYPES*NUSIGMA0; // return 0.1/(Rout_rad*L_unit*cross_section*Ne_unit); // if hotcross takes too long! #if TRUNCATE_HOTCROSS if (w <= MINW) w = MINW + fabs(0.01 * MINW); if (w >= MAXW) w = MAXW - fabs(0.01 * MAXW); if (thetae <= MINT) thetae = MINT + fabs(0.01 * MINT); if (thetae >= MAXT) thetae = MAXT - fabs(0.01 * MAXT); #endif // In-bounds for table if ((w > MINW && w < MAXW) && (thetae > MINT && thetae < MAXT)) { lw = log10(w); lT = log10(thetae); i = (int)((lw - lminw) / dlw); j = (int)((lT - lmint) / dlT); di = (lw - lminw) / dlw - i; dj = (lT - lmint) / dlT - j; lcross = (1. - di) * (1. - dj) * table[i][j] + di * (1. - dj) * table[i + 1][j] + (1. - di) * dj * table[i][j + 1] + di * dj * table[i + 1][j + 1]; if (isnan(lcross)) { fprintf(stderr, "NAN cross section %s: %g %g %d %d %g %g\n", hc_name, lw, lT, i, j, di, dj); } return pow(10., lcross); } fprintf(stderr, "Cross section %s out of bounds: %g [%g,%g] %g [%g,%g]\n", hc_name, w, MINW, MAXW, thetae, MINT, MAXT); return total_cross_num(f, w, thetae); } double total_cross_lkup( double w, int type, int interaction, const struct of_microphysics *m) { double thetae = scatterer_dimensionless_temp(type, interaction, m); #if RADIATION == RADTYPE_LIGHT { // Cold/low-energy: Use Thomson cross section if (w * thetae < 1.e-6) return (THOMSON); // Cold, but possible high-energy photon: use Klein-Nishina if (thetae < MINT) return (hc_klein_nishina(w, 0) * THOMSON); double sigma = interpolate_hc_table( w, thetae, hc_klein_nishina, table[0], HOTCROSS[0]); return THOMSON * sigma; } #elif RADIATION == RADTYPE_NEUTRINOS { if (thetae < MINT) { // easy zero-temperature limit return get_total_cross_section(w, m, type, interaction, 0); } #if MULTISCATT_TEST { double sigma = interpolate_hc_table(w, thetae, hc_flat, table[0], HOTCROSS[0]); return 4 * M_PI * sigma * ms_flat(w, 0, m, type, interaction) / hc_flat(w, 0); } #else // Normal neutrino scattering { if (type == NU_HEAVY && interaction == RSCATT_TYPE_E) { return 0.0; // heavy cannot scatter off of electrons } double sigma = interpolate_hc_table(w, thetae, hc_quad, table[0], HOTCROSS[0]); return nu_cross_factor(sigma, type, interaction, m); } #endif // neutrino scattering type } #endif // radiation type } double total_cross_num(hc_ftype f, double w, double thetae) { double dmue, dgammae, mue, gammae, maxwell, cross; if (isnan(w)) { fprintf(stderr, "NAN cross section: %g %g\n", w, thetae); return 0.; } // Check for easy limits #if RADIATION == RADTYPE_LIGHT { if (thetae < MINT && w < MINW) return 1.; if (thetae < MINT) return hc_klein_nishina(w, 0); } #endif dmue = DMUE; dgammae = thetae * DGAMMAE; // Integrate over mu_e and gamma_e, where mu_e is the cosine of the angle // between K and U_e, and the angle k is assumed to lie, wlog, along the z // z axis cross = 0.; for (mue = -1. + 0.5 * dmue; mue < 1.; mue += dmue) for (gammae = 1. + 0.5 * dgammae; gammae < 1. + MAXGAMMA * thetae; gammae += dgammae) { maxwell = 0.5 * dNdgammae(thetae, gammae); cross += dmue * dgammae * boostcross(f, w, mue, gammae) * maxwell; if (isnan(cross)) { fprintf(stderr, "NAN cross section: %g %g %g %g %g %g\n", w, thetae, mue, gammae, dNdgammae(thetae, gammae), boostcross(f, w, mue, gammae)); } } return cross; } // Normalized (per unit proper electron number density) electron distribution double dNdgammae(double thetae, double gammae) { double K2f; if (thetae > 1.e-2) { K2f = gsl_sf_bessel_Kn(2, 1. / thetae) * exp(1. / thetae); } else { K2f = sqrt(M_PI * thetae / 2.) + 15. / 8. * sqrt(M_PI / 2.) * pow(thetae, 1.5) + 105. / 128. * sqrt(M_PI / 2.) * pow(thetae, 2.5) - 315. / 1024. * sqrt(M_PI / 2.) * pow(thetae, 3.5); } return (gammae * sqrt(gammae * gammae - 1.) / (thetae * K2f)) * exp(-(gammae - 1.) / thetae); } double boostcross(hc_ftype f, double w, double mue, double gammae) { double we, boostcross, v; // Energy in electron rest frame v = sqrt(gammae * gammae - 1.) / gammae; we = w * gammae * (1. - mue * v); boostcross = f(we, mue) * (1. - mue * v); #if RADIATION == RADTYPE_LIGHT if (boostcross > 2) { fprintf(stderr, "w, mue, gammae: %g %g %g\n" "v, we, boostcross: %g %g %g\n" "kn: %g %g %g\n", w, mue, gammae, v, we, boostcross, v, we, boostcross); exit(1); } #endif if (isnan(boostcross)) { fprintf(stderr, "isnan: %g %g %g\n", w, mue, gammae); return 0.; } return boostcross; } /* * TODO: for elastic scattering, we could * inverse-transform sample for significant speedup */ void rejection_sample(dsdom_ftype f, double xmax, double k, double *k0p, double *cth, const struct of_microphysics *m, int type, int interaction) { double mu, x, sigma; do { mu = 2 * get_rand() - 1.; x = get_rand() * xmax; sigma = f(k, mu, m, type, interaction); } while (x >= sigma); *k0p = k; // because scattering is elastic *cth = mu; } double hc_klein_nishina(double we, double mue) { double sigma; if (we < 1.e-3) { sigma = 1. - 2. * we + 5.2 * we * we - 13.3 * we * we * we + 1144 * we * we * we * we / 35.; } else { sigma = (3. / 4.) * (2. / (we * we) + (1. / (2. * we) - (1. + we) / (we * we * we)) * log(1. + 2. * we) + (1. + we) / ((1. + 2. * we) * (1. + 2. * we))); } return sigma; } #if RADIATION == RADTYPE_NEUTRINOS #if MULTISCATT_TEST double hc_flat(double we, double mue) { return 1.0; } double ms_flat(double w, double mu, const struct of_microphysics *m, int type, int interaction) { int i = interaction; double sigma = hc_flat(w, 0); sigma *= (2 * 2 * i + 1) * NUSIGMA0 * pow(ms_theta_nu0, 2.0) / (16 * M_PI); sigma *= (1 + ms_delta0 * pow(mu, 2 * i + 1)); return sigma; } double ms_flat_max(const struct of_microphysics *m, int type, int interaction) { if (ms_delta0 > 0) return ms_flat(1, 1, m, type, interaction); if (ms_delta0 < 0) return ms_flat(1, -1, m, type, interaction); return ms_flat(1, 0, m, type, interaction); } #else // not multiscatt test double nu_cross(double w, double mu, const struct of_microphysics *m, int type, int interaction) { // TODO: doesn't work for electrons double sigma = hc_quad(w, mu); double delta = nu_cross_delta(type, interaction); sigma *= nu_cross_factor(sigma, type, interaction, m); sigma /= 4. * M_PI; sigma *= 1 + delta * mu; return sigma; } double nu_cross_max( const struct of_microphysics *m, int type, int interaction) { // TODO: doesn't work for electrons double sigma = hc_quad_max(); double delta = nu_cross_delta(type, interaction); sigma = nu_cross_factor(sigma, type, interaction, m); sigma /= 4. * M_PI; sigma *= 1 + fabs(delta); return sigma; } // Burrows, Reddy, Thomson, arXiv:astro-ph/0404432 double total_cross_ions(double sigma_hc, double A, double Z) { const double CFF = 1.; // form factor const double CLOS = 1.; // electron polarization const double Sion = 1.; // ion screening double W = 1. - 2. * (Z / A) * (1. - 2. * S2THW); double out = ((1. / 16.) * NUSIGMA0 * sigma_hc * A * A * (W * CFF + CLOS) * (W * CFF + CLOS) * Sion); return out; } // Burrows, Reddy, Thomson, arXiv:astro-ph/0404432 double nu_cross_delta(int type, int interaction) { // heavy cannot scatter off of electrons if (type == NU_HEAVY && interaction == RSCATT_TYPE_E) return 0.0; if (interaction == RSCATT_TYPE_P) { double Cpv = 0.5 + 2.0 * S2THW; double Cpa = 0.5; double num = (Cpv - 1) * (Cpv - 1) - GA2 * (Cpa - 1) * (Cpa - 1); double den = (Cpv - 1) * (Cpv - 1) + 3 * GA2 * (Cpa - 1) * (Cpa - 1); return num / den; } else if (interaction == RSCATT_TYPE_N) { return (1 - GA2) / (1 + 3 * GA2); } else if (interaction == RSCATT_TYPE_A || interaction == RSCATT_TYPE_ALPHA) { return 1.0; } else { fprintf(stderr, "total_cross_lkup: cross not implemented\n"); exit(1); } } // Burrows, Reddy, Thomson, arXiv:astro-ph/0404432 double nu_cross_factor( double sigma, int type, int interaction, const struct of_microphysics *m) { // heavy cannot scatter off of electrons if (type == NU_HEAVY && interaction == RSCATT_TYPE_E) return 0.0; if (interaction == RSCATT_TYPE_P) { return 0.25 * NUSIGMA0 * sigma * (4. * S4THW - 2. * S2THW + 0.25 * (1 + 3. * GA2)); } else if (interaction == RSCATT_TYPE_N) { return (NUSIGMA0 / 4.) * sigma * (1 + 3 * GA2) / 4.; } else if (interaction == RSCATT_TYPE_A) { return total_cross_ions(sigma, m->Abar, m->Zbar); } else if (interaction == RSCATT_TYPE_ALPHA) { return total_cross_ions(sigma, 4.0, 2.0); } else { // TODO: add neutrino-electron scattering fprintf(stderr, "total_cross_lkup: cross not implemented\n"); exit(1); } } double hc_quad(double we, double mue) { // we in units of hnu/Mec^2 double nu_maxw = (HPL * numax) / (ME * CL * CL); if (we >= nu_maxw) we = nu_maxw; return we * we; } double hc_quad_max() { double nu_maxw = (HPL * numax) / (ME * CL * CL); return hc_quad(nu_maxw, 0); } #endif // MULTISCATT_TEST #endif // Neutrinos #endif // RADIATION
mandelbrot-parallel.c
// // mandelbrot.c // // // The Mandelbrot calculation is to iterate the equation // z = z*z + c, where z and c are complex numbers, z is initially // zero, and c is the coordinate of the point being tested. If // the magnitude of z remains less than 2 for ever, then the point // c is in the Mandelbrot set. In this code We write out the number of iterations // before the magnitude of z exceeds 2, or UCHAR_MAX, whichever is // smaller.// // // #include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <stdio.h> #include <omp.h> /*void color(int red, int green, int blue) { #pragma omp critical { fputc((char)red, stdout); fputc((char)green, stdout); fputc((char)blue, stdout); } }*/ int main(int argc, char *argv[]) { // int w = 600, h = 400, x, y; int w = atoi(argv[1]), h = atoi(argv[2]), x, y; //each iteration, it calculates: newz = oldz*oldz + p, where p is the current pixel, and oldz stars at the origin double pr, pi; //real and imaginary part of the pixel p double newRe, newIm, oldRe, oldIm; //real and imaginary parts of new and old z double zoom = 1, moveX = -0.5, moveY = 0; //you can change these to zoom and change position int maxIterations = atoi(argv[3]);//after how much iterations the function should stop typedef unsigned char pixelType[3]; clock_t begin, end; double time_spent; pixelType *pixels=malloc(sizeof(pixelType)*h*w); FILE * sortida; printf("P6\n# CREATOR: Eric R. Weeks / mandel program\n"); printf("%d %d\n255\n",w,h); double timeBegin=omp_get_wtime(); begin = clock(); //loop through every pixel #pragma omp parallel for shared(pixels,moveX,moveY,zoom) private(x,y,pr,pi,newRe,newIm,oldRe,oldIm) schedule(static) for(y = 0; y < h; y++) for(x = 0; x < w; x++) { //calculate the initial real and imaginary part of z, based on the pixel location and zoom and position values pr = 1.5 * (x - w / 2) / (0.5 * zoom * w) + moveX; pi = (y - h / 2) / (0.5 * zoom * h) + moveY; newRe = newIm = oldRe = oldIm = 0; //these should start at 0,0 //"i" will represent the number of iterations int i; //start the iteration process for(i = 0; i < maxIterations; i++) { //remember value of previous iteration oldRe = newRe; oldIm = newIm; //the actual iteration, the real and imaginary part are calculated newRe = oldRe * oldRe - oldIm * oldIm + pr; newIm = 2 * oldRe * oldIm + pi; //if the point is outside the circle with radius 2: stop if((newRe * newRe + newIm * newIm) > 4) break; } // color(i % 256, 255, 255 * (i < maxIterations)); if(i == maxIterations){ pixels[y*w+x][0]=(char)0; pixels[y*w+x][1]=(char)0; pixels[y*w+x][2]=(char)0; } //color(0, 0, 0); // black else { double z = sqrt(newRe * newRe + newIm * newIm); int brightness = 256 * log2(1.75 + i - log2(log2(z))) / log2((double)maxIterations); //color(brightness, brightness, 255); pixels[y*w+x][0]=(char)brightness; pixels[y*w+x][1]=(char)brightness; pixels[y*w+x][2]=(char)255; } } end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; fprintf(stderr, "Elapsed time: %.2lf seconds.\n", omp_get_wtime()-timeBegin); sortida= fopen("sortida.ppm","wb"); fprintf(sortida, "P6\n# CREATOR: Eric R. Weeks / mandel program\n"); fprintf(sortida, "%d %d\n255\n", w, h); x=0; y=0; for(y = 0; y < h; y++){ for(x = 0; x < w; x++){ fwrite(pixels[y*w+x],1,sizeof(pixelType),sortida); } } fclose(sortida); free(pixels); return 0; }
GB_binop__bset_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int32) // C=scalar+B GB (_bind1st__bset_int32) // C=scalar+B' GB (_bind1st_tran__bset_int32) // C=A+scalar GB (_bind2nd__bset_int32) // C=A'+scalar GB (_bind2nd_tran__bset_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bset_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
api_test.c
#include "ctest/ctest.h" #include "splatt_test.h" #include "../src/sptensor.h" /* API includes */ #include "../include/splatt.h" #ifdef _OPENMP #include <omp.h> #endif CTEST_DATA(api) { splatt_idx_t ntensors; sptensor_t * tensors[MAX_DSETS]; }; CTEST_SETUP(api) { data->ntensors = sizeof(datasets) / sizeof(datasets[0]); for(idx_t i=0; i < data->ntensors; ++i) { data->tensors[i] = tt_read(datasets[i]); } } CTEST_TEARDOWN(api) { for(idx_t i=0; i < data->ntensors; ++i) { tt_free(data->tensors[i]); } } CTEST2(api, opts_alloc) { double * opts = splatt_default_opts(); ASSERT_NOT_NULL(opts); /* test defaults */ #ifdef _OPENMP ASSERT_EQUAL(omp_get_max_threads(), (int) opts[SPLATT_OPTION_NTHREADS]); #else ASSERT_EQUAL(1, (int) opts[SPLATT_OPTION_NTHREADS]); #endif splatt_free_opts(opts); } CTEST2(api, par_opts_alloc) { #pragma omp parallel num_threads(5) { double * opts = splatt_default_opts(); ASSERT_EQUAL(1, (int) opts[SPLATT_OPTION_NTHREADS]); splatt_free_opts(opts); } } CTEST2(api, csf_load) { splatt_csf loaded; for(idx_t i=0; i < data->ntensors; ++i) { #if 0 int ret = splatt_csf_load(datasets[i], &nmodes, &loaded, opts); #endif } } CTEST2(api, version_major) { ASSERT_EQUAL(SPLATT_VER_MAJOR, splatt_version_major()); } CTEST2(api, version_minor) { ASSERT_EQUAL(SPLATT_VER_MINOR, splatt_version_minor()); } CTEST2(api, version_subminor) { ASSERT_EQUAL(SPLATT_VER_SUBMINOR, splatt_version_subminor()); }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(8*t1+Ny+13,16)),floord(16*t2+Ny+12,16)),floord(16*t1-16*t2+Nz+Ny+11,16));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(16*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(16*t3+Nx+12,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),16*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),16*t3+14),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__le_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_bool // A.*B function (eWiseMult): GB_AemultB__le_bool // A*D function (colscale): GB_AxD__le_bool // D*A function (rowscale): GB_DxB__le_bool // C+=B function (dense accum): GB_Cdense_accumB__le_bool // C+=b function (dense accum): GB_Cdense_accumb__le_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_bool // C=scalar+B GB_bind1st__le_bool // C=scalar+B' GB_bind1st_tran__le_bool // C=A+scalar GB_bind2nd__le_bool // C=A'+scalar GB_bind2nd_tran__le_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_BOOL || GxB_NO_LE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__le_bool ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_bool ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bxnor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int32) // C=scalar+B GB (_bind1st__bxnor_int32) // C=scalar+B' GB (_bind1st_tran__bxnor_int32) // C=A+scalar GB (_bind2nd__bxnor_int32) // C=A'+scalar GB (_bind2nd_tran__bxnor_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT32 || GxB_NO_BXNOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
boundary_mask_mex.c
#include <inttypes.h> #include <omp.h> #include "mex.h" #include "boundary_mask_mex.h" void boundary_mask(uint8_t *B, const uint8_t *G, const size_t *sz); #ifdef BOUNDARY_MASK_MEX void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs != 2) || (nlhs > 1)) { mexErrMsgTxt("Usage: boundary_mask_mex(B, G);"); } uint8_t *B = (uint8_t *)mxGetData(prhs[0]); const uint8_t *G = (const uint8_t *)mxGetData(prhs[1]); const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]); boundary_mask(B, G, sz); if (nlhs == 1) { plhs[0] = mxCreateDoubleScalar(1.0); } return; } #endif void mx_boundary_mask(mxArray *mxB, const mxArray *mxG) { uint8_t *B = (uint8_t *)mxGetData(mxB); const uint8_t *G = (const uint8_t *)mxGetData(mxG); const size_t *sz = (const size_t *)mxGetDimensions(mxB); boundary_mask(B, G, sz); return; } void boundary_mask(uint8_t *B, const uint8_t *G, const size_t *sz) { size_t i, j, k; size_t l; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t NX = nx-2; const size_t NY = nx*(ny-2); const size_t NZ = nxny*(nz-2); const size_t nx2 = 2*nx; const size_t nxny2 = 2*nxny; /* offset indices */ const size_t o110 = 1 + nx + 0; const size_t o101 = 1 + 0 + nxny; const size_t o011 = 0 + nx + nxny; const size_t o111 = 1 + nx + nxny; uint8_t *b = (uint8_t *)calloc(nx*ny*nz, sizeof(*G)); /* boundary of grid */ #pragma omp parallel for private(i,j,k,l) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k <= NZ; k += nxny) { for(j = nx; j <= NY; j += nx) { l = 1 + j + k; for(i = 1; i <= NX; ++i, ++l) { if ((i == 1) || (j == nx) || (k == nxny) || (i == NX) || (j == NY) || (k == NZ)) { b[l] = G[l]; } } } } /* interior */ #pragma omp parallel for private(i,j,k,l) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k <= NZ; k += nxny2) { for(j = nx; j <= NY; j += nx2) { l = 1 + j + k; for(i = 1; i <= NX; i += 2, l += 2) { if (!(G[l] && G[l+1] && G[l+nx] && G[l+nxny] && G[l+o110] && G[l+o101] && G[l+o011] && G[l+o111])) { b[l] = G[l]; b[l+1] = G[l+1]; b[l+nx] = G[l+nx]; b[l+nxny] = G[l+nxny]; b[l+o110] = G[l+o110]; b[l+o101] = G[l+o101]; b[l+o011] = G[l+o011]; b[l+o111] = G[l+o111]; } } } } /* grow boundary band */ #pragma omp parallel for private(i,j,k,l) schedule(static) \ if (nxny*nz > 32*32*32) for(k = nxny; k <= NZ; k += nxny) { for(j = nx; j <= NY; j += nx) { l = 1 + j + k; for(i = 1; i <= NX; ++i, ++l) { if (G[l]) { B[l] = b[l-nxny] || b[l-nx] || b[l-1] || b[l] || b[l+1] || b[l+nx] || b[l+nxny]; } } } } if (NULL != b) { free(b); b = NULL; } return; }
truedepsingleelement-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // race condition due to a[i]= .. --> .. a[0] #include <stdlib.h> #include <stdio.h> int main (int argc, char* argv[]) { int len=1000; int i; int a[1000]; a[0] = 2; #pragma omp parallel for for (i=0;i<len;i++) a[i]=a[i]+a[0]; printf("a[500]=%d\n", a[500]); return 0; }
Mrpt.h
#ifndef CPP_MRPT_H_ #define CPP_MRPT_H_ #include <algorithm> #include <cmath> #include <functional> #include <map> #include <numeric> #include <random> #include <set> #include <stdexcept> #include <string> #include <utility> #include <vector> #include <Eigen/Dense> #include <Eigen/SparseCore> struct Mrpt_Parameters { int n_trees = 0; /**< Number of trees in the index. */ int depth = 0; /**< Depth of the trees in the index. */ int k = 0; /**< Number of nearest neighbors searched for (if the index is autotuned; otherwise 0). */ int votes = 0; /**< Optimal vote threshold (if the index is autotuned and the target recall is set; otherwise 0). */ double estimated_qtime = 0.0; /**< Estimated query time (if the index is autotuned and the target recall is set; otherwise 0.0). */ double estimated_recall = 0.0; /**< Estimated recall (if the index is autotuned and the target recall is set; otherwise 0.0). */ }; class Mrpt { public: /** @name Constructors * The constructor does not actually build the index. The building is done * by the function grow() which has to be called before queries can be made. * There are two different versions of the constructor which differ only * by the type of the input data. The first version takes the data set * as `Ref` to `MatrixXf`, which means that the argument * can be either `MatrixXf` or `Map<MatrixXf>` (also certain blocks of `MatrixXf` * may be accepted, see [Eigen::Ref](https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html) * for more information). The second version takes a float * pointer to an array containing the data set, and the dimension and * the sample size of the data. There are also corresponding versions * of all the member functions which take input data. In all cases the data * is assumed to be stored in column-major order such that each data point * is stored contiguously in memory. In all cases no copies are made of * the original data matrix. */ /** * @param X_ Eigen ref to the data set, stored as one data point per column */ Mrpt(const Eigen::Ref<const Eigen::MatrixXf> &X_) : X(Eigen::Map<const Eigen::MatrixXf>(X_.data(), X_.rows(), X_.cols())), n_samples(X_.cols()), dim(X_.rows()) {} /** * @param X_ a float array containing the data set with each data point * stored contiguously in memory * @param dim_ dimension of the data * @param n_samples_ number of data points */ Mrpt(const float *X_, int dim_, int n_samples_) : X(Eigen::Map<const Eigen::MatrixXf>(X_, dim_, n_samples_)), n_samples(n_samples_), dim(dim_) {} /**@}*/ /** @name Normal index building. * Build a normal (not autotuned) index. */ /** * Build a normal index. * * @param n_trees_ number of trees to be grown * @param depth_ depth of the trees; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ is the number * of data points * @param density_ expected proportion of non-zero components in the * random vectors; on the interval \f$(0,1]\f$; default value sets density to * \f$ 1 / \sqrt{d} \f$, where \f$d\f$ is the dimension of the data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(int n_trees_, int depth_, float density_ = -1.0, int seed = 0) { if(!empty()) { throw std::logic_error("The index has already been grown."); } if (n_trees_ <= 0) { throw std::out_of_range("The number of trees must be positive."); } if (depth_ <= 0 || depth_ > std::log2(n_samples)) { throw std::out_of_range("The depth must belong to the set {1, ... , log2(n)}."); } if (density_ < -1.0001 || density_ > 1.0001 || (density_ > -0.9999 && density_ < -0.0001)) { throw std::out_of_range("The density must be on the interval (0,1]."); } n_trees = n_trees_; depth = depth_; n_pool = n_trees_ * depth_; n_array = 1 << (depth_ + 1); if (density_ < 0) { density = 1.0 / std::sqrt(dim); } else { density = density_; } density < 1 ? build_sparse_random_matrix(sparse_random_matrix, n_pool, dim, density, seed) : build_dense_random_matrix(dense_random_matrix, n_pool, dim, seed); split_points = Eigen::MatrixXf(n_array, n_trees); tree_leaves = std::vector<std::vector<int>>(n_trees); count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth); leaf_first_indices = leaf_first_indices_all[depth]; #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { Eigen::MatrixXf tree_projections; if (density < 1) tree_projections.noalias() = sparse_random_matrix.middleRows(n_tree * depth, depth) * X; else tree_projections.noalias() = dense_random_matrix.middleRows(n_tree * depth, depth) * X; tree_leaves[n_tree] = std::vector<int>(n_samples); std::vector<int> &indices = tree_leaves[n_tree]; std::iota(indices.begin(), indices.end(), 0); grow_subtree(indices.begin(), indices.end(), 0, 0, n_tree, tree_projections); } } /**@}*/ /** @name Autotuned index building * Builds an index by autotuning such that the parameters giving the fastest * query time at the target recall level are found. If the target recall level * is not reached at all, then an index giving the highest recall level * is built. The parameters() function can be used to retrieve these optimal * parameter values and the estimated query time and the estimated recall. */ /** * Build an autotuned index. * * @param target_recall target recall level; on the range [0,1] * @param Q Eigen ref to the the test queries (col = data point, row = dimension). * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to 5 * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, 10) \f$ * @param density expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(double target_recall, const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density = -1.0, int seed = 0) { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } grow(Q, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed); prune(target_recall); } /** Build an autotuned index. * * @param target_recall target recall level; on the range [0,1] * @param Q float array containing the test queries * @param n_test number of test queries * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to 5 * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, 10) \f$ * @param density expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(double target_recall, const float *Q, int n_test, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density = -1.0, int seed = 0) { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } grow(Q, n_test, k_, trees_max, depth_max, depth_min_, votes_max_, density, seed); prune(target_recall); } /** * Get the optimal parameters and the estimated recall and query time found * by autotuning. If the index is autotuned without preset recall level, * `estimated_recall`, `estimated_qtime` and `votes` are set to their * default value 0, and `n_trees` and `depth` are set to `trees_max` and * `depth_max, respectively. If the index is not autotuned, * `estimated_recall`, `estimated_qtime`, `votes` and `k` are all set to * their default value 0. * * @return parameters of the index */ Mrpt_Parameters parameters() const { if (index_type == normal || index_type == autotuned_unpruned) { Mrpt_Parameters p; p.n_trees = n_trees; p.depth = depth; p.k = par.k; return p; } return par; } /**@}*/ /** @name Autotuned index building without preset recall level * Build an autotuned index. This version does not require prespecifying * a target recall level, but an index generated by this function can be used * to subset different indices with different recall levels. This is done by * subset(). The function optimal_parameters() can be used to retrieve a * pareto frontier of optimal parameters. */ /**@{*/ /** Build an autotuned index without prespecifying a recall level. * * @param data a float array containing the test queries. * @param n_test number of test queries * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max maximum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters; in the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to 5 * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, 10) \f$ * @param density_ expected proportion of non-zero components in the random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device **/ void grow(const float *data, int n_test, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0) { if(!empty()) { throw std::logic_error("The index has already been grown."); } if (k_ <= 0 || k_ > n_samples) { throw std::out_of_range("k_ must belong to the set {1, ..., n}."); } if (trees_max < -1 || trees_max == 0) { throw std::out_of_range("trees_max must be positive."); } if (depth_max < -1 || depth_max == 0 || depth_max > std::log2(n_samples)) { throw std::out_of_range("depth_max must belong to the set {1, ... , log2(n)}."); } if (depth_min_ < -1 || depth_min_ == 0 || depth_min_ > depth_max) { throw std::out_of_range("depth_min_ must belong to the set {1, ... , depth_max}"); } if (votes_max_ < -1 || votes_max_ == 0 || votes_max_ > trees_max) { throw std::out_of_range("votes_max_ must belong to the set {1, ... , trees_max}."); } if (density_ < -1.0001 || density_ > 1.0001 || (density_ > -0.9999 && density_ < -0.0001)) { throw std::out_of_range("The density must be on the interval (0,1]."); } if (trees_max == - 1) { trees_max = std::min(std::sqrt(n_samples), 1000.0); } if (depth_min_ == -1) { depth_min = std::min(static_cast<int>(std::log2(n_samples)), 5); } else { depth_min = depth_min_; } if (depth_max == -1) { depth_max = std::max(static_cast<int>(std::log2(n_samples) - 4), depth_min); } if (votes_max_ == -1) { votes_max = std::max(trees_max / 10, std::min(trees_max, 10)); } else { votes_max = votes_max_; } if (density_ < 0) { density = 1.0 / std::sqrt(dim); } else { density = density_; } k = k_; const Eigen::Map<const Eigen::MatrixXf> Q(data, dim, n_test); grow(trees_max, depth_max, density, seed); Eigen::MatrixXi exact(k, n_test); compute_exact(Q, exact); std::vector<Eigen::MatrixXd> recalls(depth_max - depth_min + 1); cs_sizes = std::vector<Eigen::MatrixXd>(depth_max - depth_min + 1); for (int d = depth_min; d <= depth_max; ++d) { recalls[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max); cs_sizes[d - depth_min] = Eigen::MatrixXd::Zero(votes_max, trees_max); } for (int i = 0; i < n_test; ++i) { std::vector<Eigen::MatrixXd> recall_tmp(depth_max - depth_min + 1); std::vector<Eigen::MatrixXd> cs_size_tmp(depth_max - depth_min + 1); count_elected(Q.col(i), Eigen::Map<Eigen::VectorXi>(exact.data() + i * k, k), votes_max, recall_tmp, cs_size_tmp); for (int d = depth_min; d <= depth_max; ++d) { recalls[d - depth_min] += recall_tmp[d - depth_min]; cs_sizes[d - depth_min] += cs_size_tmp[d - depth_min]; } } for (int d = depth_min; d <= depth_max; ++d) { recalls[d - depth_min] /= (k * n_test); cs_sizes[d - depth_min] /= n_test; } fit_times(Q); std::set<Mrpt_Parameters,decltype(is_faster)*> pars = list_parameters(recalls); opt_pars = pareto_frontier(pars); index_type = autotuned_unpruned; par.k = k_; } /** Build an autotuned index without prespecifying a recall level. * * @param Q Eigen ref to the test queries. * @param k_ number of nearest neighbors searched for * @param trees_max number of trees grown; default value -1 sets this to * \f$ \mathrm{min}(\sqrt{n}, 1000)\f$, where \f$n\f$ is the number of data points. * @param depth_max depth of trees grown; ; on the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$, where \f$n \f$ * is the number of data points; default value -1 sets this to * \f$ \log_2(n) - 4 \f$, where \f$n\f$ is the number of data points * @param depth_min_ minimum depth of trees considered when searching for * optimal parameters on the set * \f$\{1,2, \dots ,\lfloor \log_2 (n) \rfloor \}\f$; a default value -1 * sets this to 5 * @param votes_max_ maximum number of votes considered when searching for * optimal parameters; a default value -1 sets this to * \f$ \mathrm{max}(\lfloor \mathrm{trees\_max} / 10 \rfloor, 10) \f$ * @param density expected proportion of non-zero components of random vectors; * default value -1.0 sets this to \f$ 1 / \sqrt{d} \f$, where \f$ d\f$ is * the dimension of data * @param seed seed given to a rng when generating random vectors; * a default value 0 initializes the rng randomly with std::random_device */ void grow(const Eigen::Ref<const Eigen::MatrixXf> &Q, int k_, int trees_max = -1, int depth_max = -1, int depth_min_ = -1, int votes_max_ = -1, float density_ = -1.0, int seed = 0) { if (Q.rows() != dim) { throw std::invalid_argument("Dimensions of the data and the validation set do not match."); } grow(Q.data(), Q.cols(), k_, trees_max, depth_max, depth_min_, votes_max_, density_, seed); } /** Create a new index by copying trees from an autotuned index grown * without a prespecified recall level. The index is created so that * it gives a fastest query time at the recall level given as the parameter. * If this recall level is not met, then it creates an index with a * highest possible recall level. * * @param target_recall target recall level; on the range [0,1] * @return an autotuned Mrpt index with a recall level at least as * high as target_recall */ Mrpt subset(double target_recall) const { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } Mrpt index2(X); index2.par = parameters(target_recall); int depth_max = depth; index2.n_trees = index2.par.n_trees; index2.depth = index2.par.depth; index2.votes = index2.par.votes; index2.n_pool = index2.depth * index2.n_trees; index2.n_array = 1 << (index2.depth + 1); index2.tree_leaves.assign(tree_leaves.begin(), tree_leaves.begin() + index2.n_trees); index2.leaf_first_indices_all = leaf_first_indices_all; index2.density = density; index2.k = k; index2.split_points = split_points.topLeftCorner(index2.n_array, index2.n_trees); index2.leaf_first_indices = leaf_first_indices_all[index2.depth]; if (index2.density < 1) { index2.sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(index2.n_pool, index2.dim); for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree) index2.sparse_random_matrix.middleRows(n_tree * index2.depth, index2.depth) = sparse_random_matrix.middleRows(n_tree * depth_max, index2.depth); } else { index2.dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(index2.n_pool, index2.dim); for (int n_tree = 0; n_tree < index2.n_trees; ++n_tree) index2.dense_random_matrix.middleRows(n_tree * index2.depth, index2.depth) = dense_random_matrix.middleRows(n_tree * depth_max, index2.depth); } index2.index_type = autotuned; return index2; } /** * Return the pareto frontier of optimal parameters for an index which * is autotuned without setting a recall level. This means that each * parameter combination in a returned vector is optimal in a sense * that it is a fastest (measured by query time) parameter combination * to obtain as least as high recall level that it has. * * @return vector of optimal parameters */ std::vector<Mrpt_Parameters> optimal_parameters() const { if (index_type == normal) { throw std::logic_error("The list of optimal parameters cannot be retrieved for the non-autotuned index."); } if (index_type == autotuned) { throw std::logic_error("The list of optimal parameters cannot be retrieved for the index which has already been subsetted or deleted to the target recall level."); } std::vector<Mrpt_Parameters> new_pars; std::copy(opt_pars.begin(), opt_pars.end(), std::back_inserter(new_pars)); return new_pars; } /**@}*/ /** @name Approximate k-nn search * A query using a non-autotuned index. Finds k approximate nearest neighbors * from a data set X for a query point q. Because the index is not autotuned, * k and vote threshold are set manually. The indices of k nearest neighbors * are written to a buffer out, which has to be preallocated to have at least * length k. Optionally also Euclidean distances to these k nearest points * are written to a buffer out_distances. If there are less than k points in * the candidate set, -1 is written to the remaining locations of the * output buffers. */ /** * Approximate k-nn search using a normal index. * * @param data pointer to an array containing the query point * @param k number of nearest neighbors searched for * @param vote_threshold - number of votes required for a query point to be included in the candidate set * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const float *data, int k, int vote_threshold, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { if (k <= 0 || k > n_samples) { throw std::out_of_range("k must belong to the set {1, ..., n}."); } if (vote_threshold <= 0 || vote_threshold > n_trees) { throw std::out_of_range("vote_threshold must belong to the set {1, ... , n_trees}."); } if (empty()) { throw std::logic_error("The index must be built before making queries."); } const Eigen::Map<const Eigen::VectorXf> q(data, dim); Eigen::VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; std::vector<int> found_leaves(n_trees); /* * The following loops over all trees, and routes the query to exactly one * leaf in each. */ #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } } found_leaves[n_tree] = idx_tree - (1 << depth) + 1; } int n_elected = 0, max_leaf_size = n_samples / (1 << depth) + 1; Eigen::VectorXi elected(n_trees * max_leaf_size); Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int leaf_begin = leaf_first_indices[found_leaves[n_tree]]; int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; if (++votes(idx) == vote_threshold) elected(n_elected++) = idx; } } if (out_n_elected) { *out_n_elected = n_elected; } exact_knn(q, k, elected, n_elected, out, out_distances); } /** * Approximate k-nn search using a normal index. * * @param q Eigen ref to the query point * @param k number of nearest neighbors searched for * @param vote_threshold number of votes required for a query point to be included in the candidate set * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int vote_threshold, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { query(q.data(), k, vote_threshold, out, out_distances, out_n_elected); } /**@}*/ /** @name Approximate k-nn search using autotuned index * Approximate k-nn search using an autotuned index. Finds k approximate * nearest neighbors from a data set X for a query point q. Because the index * is autotuned, no parameters other than a query point and an output are * required: k is preset, and the optimal vote count is used automatically. * The indices of k nearest neighbors are written to a buffer out, which has * to be preallocated to have at least length k. Optionally also the Euclidean * distances to these k nearest points are written to a buffer * out_distances. If there are less than k points in the candidate set, * -1 is written to the remaining locations of the output buffers. */ /** * Approximate k-nn search using an autotuned index. * * @param q pointer to an array containing the query point * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const float *q, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { if (index_type == normal) { throw std::logic_error("The index is not autotuned: k and vote threshold has to be specified."); } if (index_type == autotuned_unpruned) { throw std::logic_error("The target recall level has to be set before making queries."); } query(q, k, votes, out, out_distances, out_n_elected); } /** * Approximate k-nn search using an autotuned index. * * @param q Eigen ref to the query point * @param out output buffer (size = k) for the indices of k approximate nearest neighbors * @param out_distances optional output buffer (size = k) for distances to k approximate nearest neighbors * @param out_n_elected optional output parameter (size = 1) for the candidate set size */ void query(const Eigen::Ref<const Eigen::VectorXf> &q, int *out, float *out_distances = nullptr, int *out_n_elected = nullptr) const { query(q.data(), out, out_distances, out_n_elected); } /**@}*/ /** @name Exact k-nn search * Functions for fast exact k-nn search: find k nearest neighbors for a * query point q from a data set X_. The indices of k nearest neighbors are * written to a buffer out, which has to be preallocated to have at least * length k. Optionally also the Euclidean distances to these k nearest points * are written to a buffer out_distances. There are both static and member * versions. */ /** * @param q_data pointer to an array containing the query point * @param X_data pointer to an array containing the data set * @param dim dimension of data * @param n_samples number of points in a data set * @param k number of neighbors searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ static void exact_knn(const float *q_data, const float *X_data, int dim, int n_samples, int k, int *out, float *out_distances = nullptr) { const Eigen::Map<const Eigen::MatrixXf> X(X_data, dim, n_samples); const Eigen::Map<const Eigen::VectorXf> q(q_data, dim); if (k < 1 || k > n_samples) { throw std::out_of_range("k must be positive and no greater than the sample size of data X."); } Eigen::VectorXf distances(n_samples); #pragma omp parallel for for (int i = 0; i < n_samples; ++i) distances(i) = (X.col(i) - q).squaredNorm(); if (k == 1) { Eigen::MatrixXf::Index index; distances.minCoeff(&index); out[0] = index; if (out_distances) out_distances[0] = std::sqrt(distances(index)); return; } Eigen::VectorXi idx(n_samples); std::iota(idx.data(), idx.data() + n_samples, 0); std::partial_sort(idx.data(), idx.data() + k, idx.data() + n_samples, [&distances](int i1, int i2) { return distances(i1) < distances(i2); }); for (int i = 0; i < k; ++i) out[i] = idx(i); if (out_distances) { for (int i = 0; i < k; ++i) out_distances[i] = std::sqrt(distances(idx(i))); } } /** * @param q Eigen ref to a query point * @param X Eigen ref to a data set * @param k number of neighbors searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ static void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q, const Eigen::Ref<const Eigen::MatrixXf> &X, int k, int *out, float *out_distances = nullptr) { Mrpt::exact_knn(q.data(), X.data(), X.rows(), X.cols(), k, out, out_distances); } /** * @param q pointer to an array containing the query point * @param k number of neighbors searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ void exact_knn(const float *q, int k, int *out, float *out_distances = nullptr) const { Mrpt::exact_knn(q, X.data(), dim, n_samples, k, out, out_distances); } /** * @param q pointer to an array containing the query point * @param X pointer to an array containing the data set * @param k number of points searched for * @param out output buffer (size = k) for the indices of k nearest neighbors * @param out_distances optional output buffer (size = k) for the distances to k nearest neighbors */ void exact_knn(const Eigen::Ref<const Eigen::VectorXf> &q, int k, int *out, float *out_distances = nullptr) const { Mrpt::exact_knn(q.data(), X.data(), dim, n_samples, k, out, out_distances); } /**@}*/ /** @name Utility functions * Saving and loading an index and checking if it is already constructed. * Saving and loading work for both autotuned and non-autotuned indices, and * load() retrieves also the optimal parameters found by autotuning. * The same data set used to build a saved index has to be used to * construct the index into which it is loaded. */ /** * Saves the index to a file. * * @param path - filepath to the output file. * @return true if saving succeeded, false otherwise. */ bool save(const char *path) const { FILE *fd; if ((fd = fopen(path, "wb")) == NULL) return false; int i = index_type; fwrite(&i, sizeof(int), 1, fd); if (index_type == 2) { write_parameter_list(opt_pars, fd); } write_parameters(&par, fd); fwrite(&n_trees, sizeof(int), 1, fd); fwrite(&depth, sizeof(int), 1, fd); fwrite(&density, sizeof(float), 1, fd); fwrite(split_points.data(), sizeof(float), n_array * n_trees, fd); // save tree leaves for (int i = 0; i < n_trees; ++i) { int sz = tree_leaves[i].size(); fwrite(&sz, sizeof(int), 1, fd); fwrite(&tree_leaves[i][0], sizeof(int), sz, fd); } // save random matrix if (density < 1) { int non_zeros = sparse_random_matrix.nonZeros(); fwrite(&non_zeros, sizeof(int), 1, fd); for (int k = 0; k < sparse_random_matrix.outerSize(); ++k) { for (Eigen::SparseMatrix<float, Eigen::RowMajor>::InnerIterator it(sparse_random_matrix, k); it; ++it) { float val = it.value(); int row = it.row(), col = it.col(); fwrite(&row, sizeof(int), 1, fd); fwrite(&col, sizeof(int), 1, fd); fwrite(&val, sizeof(float), 1, fd); } } } else { fwrite(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Loads an index from a file. * * @param path filepath to the index file. * @return true if loading succeeded, false otherwise. */ bool load(const char *path) { FILE *fd; if ((fd = fopen(path, "rb")) == NULL) return false; int i; fread(&i, sizeof(int), 1, fd); index_type = static_cast<itype>(i); if (index_type == autotuned_unpruned) { read_parameter_list(fd); } read_parameters(&par, fd); fread(&n_trees, sizeof(int), 1, fd); fread(&depth, sizeof(int), 1, fd); fread(&density, sizeof(float), 1, fd); n_pool = n_trees * depth; n_array = 1 << (depth + 1); count_first_leaf_indices_all(leaf_first_indices_all, n_samples, depth); leaf_first_indices = leaf_first_indices_all[depth]; split_points = Eigen::MatrixXf(n_array, n_trees); fread(split_points.data(), sizeof(float), n_array * n_trees, fd); // load tree leaves tree_leaves = std::vector<std::vector<int>>(n_trees); for (int i = 0; i < n_trees; ++i) { int sz; fread(&sz, sizeof(int), 1, fd); std::vector<int> leaves(sz); fread(&leaves[0], sizeof(int), sz, fd); tree_leaves[i] = leaves; } // load random matrix if (density < 1) { int non_zeros; fread(&non_zeros, sizeof(int), 1, fd); sparse_random_matrix = Eigen::SparseMatrix<float>(n_pool, dim); std::vector<Eigen::Triplet<float>> triplets; for (int k = 0; k < non_zeros; ++k) { int row, col; float val; fread(&row, sizeof(int), 1, fd); fread(&col, sizeof(int), 1, fd); fread(&val, sizeof(float), 1, fd); triplets.push_back(Eigen::Triplet<float>(row, col, val)); } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } else { dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_pool, dim); fread(dense_random_matrix.data(), sizeof(float), n_pool * dim, fd); } fclose(fd); return true; } /** * Is the index is already constructed or not? * * @return - is the index empty? */ bool empty() const { return n_trees == 0; } /**@}*/ /** @name * Friend declarations for test fixtures. Tests are located at * https://github.com/vioshyvo/RP-test. */ friend class MrptTest; friend class UtilityTest; /**@}*/ private: /** * Builds a single random projection tree. The tree is constructed by recursively * projecting the data on a random vector and splitting into two by the median. */ void grow_subtree(std::vector<int>::iterator begin, std::vector<int>::iterator end, int tree_level, int i, int n_tree, const Eigen::MatrixXf &tree_projections) { int n = end - begin; int idx_left = 2 * i + 1; int idx_right = idx_left + 1; if (tree_level == depth) return; std::nth_element(begin, begin + n / 2, end, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); auto mid = end - n / 2; if (n % 2) { split_points(i, n_tree) = tree_projections(tree_level, *(mid - 1)); } else { auto left_it = std::max_element(begin, mid, [&tree_projections, tree_level] (int i1, int i2) { return tree_projections(tree_level, i1) < tree_projections(tree_level, i2); }); split_points(i, n_tree) = (tree_projections(tree_level, *mid) + tree_projections(tree_level, *left_it)) / 2.0; } grow_subtree(begin, mid, tree_level + 1, idx_left, n_tree, tree_projections); grow_subtree(mid, end, tree_level + 1, idx_right, n_tree, tree_projections); } /** * Find k nearest neighbors from data for the query point */ void exact_knn(const Eigen::Map<const Eigen::VectorXf> &q, int k, const Eigen::VectorXi &indices, int n_elected, int *out, float *out_distances = nullptr) const { if (!n_elected) { for (int i = 0; i < k; ++i) out[i] = -1; if (out_distances) { for (int i = 0; i < k; ++i) out_distances[i] = -1; } return; } Eigen::VectorXf distances(n_elected); #pragma omp parallel for for (int i = 0; i < n_elected; ++i) distances(i) = (X.col(indices(i)) - q).squaredNorm(); if (k == 1) { Eigen::MatrixXf::Index index; distances.minCoeff(&index); out[0] = n_elected ? indices(index) : -1; if (out_distances) out_distances[0] = n_elected ? std::sqrt(distances(index)) : -1; return; } int n_to_sort = n_elected > k ? k : n_elected; Eigen::VectorXi idx(n_elected); std::iota(idx.data(), idx.data() + n_elected, 0); std::partial_sort(idx.data(), idx.data() + n_to_sort, idx.data() + n_elected, [&distances](int i1, int i2) { return distances(i1) < distances(i2); }); for (int i = 0; i < k; ++i) out[i] = i < n_elected ? indices(idx(i)) : -1; if (out_distances) { for (int i = 0; i < k; ++i) out_distances[i] = i < n_elected ? std::sqrt(distances(idx(i))) : -1; } } void prune(double target_recall) { if (target_recall < 0.0 - epsilon || target_recall > 1.0 + epsilon) { throw std::out_of_range("Target recall must be on the interval [0,1]."); } par = parameters(target_recall); if (!par.n_trees) { return; } int depth_max = depth; n_trees = par.n_trees; depth = par.depth; votes = par.votes; n_pool = depth * n_trees; n_array = 1 << (depth + 1); tree_leaves.resize(n_trees); tree_leaves.shrink_to_fit(); split_points.conservativeResize(n_array, n_trees); leaf_first_indices = leaf_first_indices_all[depth]; if (density < 1) { Eigen::SparseMatrix<float, Eigen::RowMajor> srm_new(n_pool, dim); for (int n_tree = 0; n_tree < n_trees; ++n_tree) srm_new.middleRows(n_tree * depth, depth) = sparse_random_matrix.middleRows(n_tree * depth_max, depth); sparse_random_matrix = srm_new; } else { Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> drm_new(n_pool, dim); for (int n_tree = 0; n_tree < n_trees; ++n_tree) drm_new.middleRows(n_tree * depth, depth) = dense_random_matrix.middleRows(n_tree * depth_max, depth); dense_random_matrix = drm_new; } index_type = autotuned; } void count_elected(const Eigen::VectorXf &q, const Eigen::Map<Eigen::VectorXi> &exact, int votes_max, std::vector<Eigen::MatrixXd> &recalls, std::vector<Eigen::MatrixXd> &cs_sizes) const { Eigen::VectorXf projected_query(n_pool); if (density < 1) projected_query.noalias() = sparse_random_matrix * q; else projected_query.noalias() = dense_random_matrix * q; int depth_min = depth - recalls.size() + 1; std::vector<std::vector<int>> start_indices(n_trees); #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { start_indices[n_tree] = std::vector<int>(depth - depth_min + 1); int idx_tree = 0; for (int d = 0; d < depth; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } if (d >= depth_min - 1) start_indices[n_tree][d - depth_min + 1] = idx_tree - (1 << (d + 1)) + 1; } } const int *exact_begin = exact.data(); const int *exact_end = exact.data() + exact.size(); for (int depth_crnt = depth_min; depth_crnt <= depth; ++depth_crnt) { Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples); const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt]; Eigen::MatrixXd recall(votes_max, n_trees); Eigen::MatrixXd candidate_set_size(votes_max, n_trees); recall.col(0) = Eigen::VectorXd::Zero(votes_max); candidate_set_size.col(0) = Eigen::VectorXd::Zero(votes_max); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { std::vector<int> &found_leaves = start_indices[n_tree]; if (n_tree) { recall.col(n_tree) = recall.col(n_tree - 1); candidate_set_size.col(n_tree) = candidate_set_size.col(n_tree - 1); } int leaf_begin = leaf_first_indices[found_leaves[depth_crnt - depth_min]]; int leaf_end = leaf_first_indices[found_leaves[depth_crnt - depth_min] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; int v = ++votes(idx); if (v <= votes_max) { candidate_set_size(v - 1, n_tree)++; if (std::find(exact_begin, exact_end, idx) != exact_end) recall(v - 1, n_tree)++; } } } recalls[depth_crnt - depth_min] = recall; cs_sizes[depth_crnt - depth_min] = candidate_set_size; } } /** * Builds a random sparse matrix for use in random projection. The components of * the matrix are drawn from the distribution * * 0 w.p. 1 - a * N(0, 1) w.p. a * * where a = density. */ static void build_sparse_random_matrix(Eigen::SparseMatrix<float, Eigen::RowMajor> &sparse_random_matrix, int n_row, int n_col, float density, int seed = 0) { sparse_random_matrix = Eigen::SparseMatrix<float, Eigen::RowMajor>(n_row, n_col); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::uniform_real_distribution<float> uni_dist(0, 1); std::normal_distribution<float> norm_dist(0, 1); std::vector<Eigen::Triplet<float>> triplets; for (int j = 0; j < n_row; ++j) { for (int i = 0; i < n_col; ++i) { if (uni_dist(gen) > density) continue; triplets.push_back(Eigen::Triplet<float>(j, i, norm_dist(gen))); } } sparse_random_matrix.setFromTriplets(triplets.begin(), triplets.end()); sparse_random_matrix.makeCompressed(); } /* * Builds a random dense matrix for use in random projection. The components of * the matrix are drawn from the standard normal distribution. */ static void build_dense_random_matrix(Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> &dense_random_matrix, int n_row, int n_col, int seed = 0) { dense_random_matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>(n_row, n_col); std::random_device rd; int s = seed ? seed : rd(); std::mt19937 gen(s); std::normal_distribution<float> normal_dist(0, 1); std::generate(dense_random_matrix.data(), dense_random_matrix.data() + n_row * n_col, [&normal_dist, &gen] { return normal_dist(gen); }); } void compute_exact(const Eigen::Map<const Eigen::MatrixXf> &Q, Eigen::MatrixXi &out_exact) const { int n_test = Q.cols(); Eigen::VectorXi idx(n_samples); std::iota(idx.data(), idx.data() + n_samples, 0); for (int i = 0; i < n_test; ++i) { exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + i * dim, dim), k, idx, n_samples, out_exact.data() + i * k); std::sort(out_exact.data() + i * k, out_exact.data() + i * k + k); } } static bool is_faster(const Mrpt_Parameters &par1, const Mrpt_Parameters &par2) { return par1.estimated_qtime < par2.estimated_qtime; } void vote(const Eigen::VectorXf &projected_query, int vote_threshold, Eigen::VectorXi &elected, int &n_elected, int n_trees, int depth_crnt) { std::vector<int> found_leaves(n_trees); const std::vector<int> &leaf_first_indices = leaf_first_indices_all[depth_crnt]; #pragma omp parallel for for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int idx_tree = 0; for (int d = 0; d < depth_crnt; ++d) { const int j = n_tree * depth + d; const int idx_left = 2 * idx_tree + 1; const int idx_right = idx_left + 1; const float split_point = split_points(idx_tree, n_tree); if (projected_query(j) <= split_point) { idx_tree = idx_left; } else { idx_tree = idx_right; } } found_leaves[n_tree] = idx_tree - (1 << depth_crnt) + 1; } int max_leaf_size = n_samples / (1 << depth_crnt) + 1; elected = Eigen::VectorXi(n_trees * max_leaf_size); Eigen::VectorXi votes = Eigen::VectorXi::Zero(n_samples); // count votes for (int n_tree = 0; n_tree < n_trees; ++n_tree) { int leaf_begin = leaf_first_indices[found_leaves[n_tree]]; int leaf_end = leaf_first_indices[found_leaves[n_tree] + 1]; const std::vector<int> &indices = tree_leaves[n_tree]; for (int i = leaf_begin; i < leaf_end; ++i) { int idx = indices[i]; if (++votes(idx) == vote_threshold) elected(n_elected++) = idx; } } } std::pair<double,double> fit_projection_times(const Eigen::Map<const Eigen::MatrixXf> &Q, std::vector<int> &exact_x) { std::vector<double> projection_times, projection_x; long double idx_sum = 0; std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50}; generate_x(tested_trees, n_trees, 10, n_trees); for (int d = depth_min; d <= depth; ++d) { for (int i = 0; i < (int) tested_trees.size(); ++i) { int t = tested_trees[i]; int n_random_vectors = t * d; projection_x.push_back(n_random_vectors); Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_mat; Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_mat; if (density < 1) { build_sparse_random_matrix(sparse_mat, n_random_vectors, dim, density); } else { build_dense_random_matrix(dense_mat, n_random_vectors, dim); } double start_proj = omp_get_wtime(); Eigen::VectorXf projected_query(n_random_vectors); if (density < 1) { projected_query.noalias() = sparse_mat * Q.col(0); } else { projected_query.noalias() = dense_mat * Q.col(0); } double end_proj = omp_get_wtime(); projection_times.push_back(end_proj - start_proj); idx_sum += projected_query.norm(); int votes_index = votes_max < t ? votes_max : t; for (int v = 1; v <= votes_index; ++v) { int cs_size = get_candidate_set_size(t, d, v); if (cs_size > 0) exact_x.push_back(cs_size); } } } // use results to ensure that the compiler does not optimize away the timed code. projection_x[0] += idx_sum > 1.0 ? 0.0000 : 0.0001; return fit_theil_sen(projection_x, projection_times); } std::vector<std::map<int,std::pair<double,double>>> fit_voting_times(const Eigen::Map<const Eigen::MatrixXf> &Q) { int n_test = Q.cols(); std::random_device rd; std::mt19937 rng(rd()); std::uniform_int_distribution<int> uni(0, n_test - 1); std::vector<int> tested_trees {1,2,3,4,5,7,10,15,20,25,30,40,50}; generate_x(tested_trees, n_trees, 10, n_trees); std::vector<int> vote_thresholds_x {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; generate_x(vote_thresholds_x, votes_max, 10, votes_max); beta_voting = std::vector<std::map<int,std::pair<double,double>>>(); for (int d = depth_min; d <= depth; ++d) { std::map<int,std::pair<double,double>> beta; for (const auto &v : vote_thresholds_x) { long double idx_sum = 0; std::vector<double> voting_times, voting_x; for (int i = 0; i < (int) tested_trees.size(); ++i) { int t = tested_trees[i]; int n_el = 0; Eigen::VectorXi elected; auto ri = uni(rng); Eigen::VectorXf projected_query(n_trees * depth); if (density < 1) { projected_query.noalias() = sparse_random_matrix * Q.col(ri); } else { projected_query.noalias() = dense_random_matrix * Q.col(ri); } double start_voting = omp_get_wtime(); vote(projected_query, v, elected, n_el, t, d); double end_voting = omp_get_wtime(); voting_times.push_back(end_voting - start_voting); voting_x.push_back(t); for (int i = 0; i < n_el; ++i) idx_sum += elected(i); } voting_x[0] += idx_sum > 1.0 ? 0.0 : 0.00001; beta[v] = fit_theil_sen(voting_x, voting_times); } beta_voting.push_back(beta); } return beta_voting; } static void generate_x(std::vector<int> &x, int max_generated, int n_tested, int max_val) { n_tested = max_generated > n_tested ? n_tested : max_val; int increment = max_generated / n_tested; for (int i = 1; i <= n_tested; ++i) { if (std::find(x.begin(), x.end(), i * increment) == x.end() && i * increment <= max_generated) { x.push_back(i * increment); } } auto end = std::remove_if(x.begin(), x.end(), [max_val](int t) { return t > max_val; }); x.erase(end, x.end()); } std::pair<double,double> fit_exact_times(const Eigen::Map<const Eigen::MatrixXf> &Q) { std::vector<int> s_tested {1,2,5,10,20,35,50,75,100,150,200,300,400,500}; generate_x(s_tested, n_samples / 20, 20, n_samples); int n_test = Q.cols(); std::vector<double> exact_times; long double idx_sum = 0; std::random_device rd; std::mt19937 rng(rd()); std::uniform_int_distribution<int> uni(0, n_test - 1); std::uniform_int_distribution<int> uni2(0, n_samples - 1); std::vector<double> ex; int n_sim = 100; for (int i = 0; i < (int) s_tested.size(); ++i) { double mean_exact_time = 0; int s_size = s_tested[i]; ex.push_back(s_size); for (int m = 0; m < n_sim; ++m) { auto ri = uni(rng); Eigen::VectorXi elected(s_size); for (int j = 0; j < elected.size(); ++j) elected(j) = uni2(rng); double start_exact = omp_get_wtime(); std::vector<int> res(k); exact_knn(Eigen::Map<const Eigen::VectorXf>(Q.data() + ri * dim, dim), k, elected, s_size, &res[0]); double end_exact = omp_get_wtime(); mean_exact_time += (end_exact - start_exact); for (int l = 0; l < k; ++l) idx_sum += res[l]; } mean_exact_time /= n_sim; exact_times.push_back(mean_exact_time); } ex[0] += idx_sum > 1.0 ? 0.0 : 0.00001; return fit_theil_sen(ex, exact_times); } std::set<Mrpt_Parameters,decltype(is_faster)*> list_parameters(const std::vector<Eigen::MatrixXd> &recalls) { std::set<Mrpt_Parameters,decltype(is_faster)*> pars(is_faster); std::vector<Eigen::MatrixXd> query_times(depth - depth_min + 1); for (int d = depth_min; d <= depth; ++d) { Eigen::MatrixXd query_time = Eigen::MatrixXd::Zero(votes_max, n_trees); for (int t = 1; t <= n_trees; ++t) { int votes_index = votes_max < t ? votes_max : t; for (int v = 1; v <= votes_index; ++v) { double qt = get_query_time(t, d, v); query_time(v - 1, t - 1) = qt; Mrpt_Parameters p; p.n_trees = t; p.depth = d; p.votes = v; p.k = k; p.estimated_qtime = qt; p.estimated_recall = recalls[d - depth_min](v - 1, t - 1); pars.insert(p); } } query_times[d - depth_min] = query_time; } return pars; } std::set<Mrpt_Parameters,decltype(is_faster)*> pareto_frontier(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars) { opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster); double best_recall = -1.0; for (const auto &p : pars) { // compute pareto frontier for query times and recalls if (p.estimated_recall > best_recall) { opt_pars.insert(p); best_recall = p.estimated_recall; } } return opt_pars; } void fit_times(const Eigen::Map<const Eigen::MatrixXf> &Q) { std::vector<int> exact_x; beta_projection = fit_projection_times(Q, exact_x); beta_voting = fit_voting_times(Q); beta_exact = fit_exact_times(Q); } static std::pair<double,double> fit_theil_sen(const std::vector<double> &x, const std::vector<double> &y) { int n = x.size(); std::vector<double> slopes; for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i != j) slopes.push_back((y[j] - y[i]) / (x[j] - x[i])); } } int n_slopes = slopes.size(); std::nth_element(slopes.begin(), slopes.begin() + n_slopes / 2, slopes.end()); double slope = *(slopes.begin() + n_slopes / 2); std::vector<double> residuals(n); for (int i = 0; i < n; ++i) residuals[i] = y[i] - slope * x[i]; std::nth_element(residuals.begin(), residuals.begin() + n / 2, residuals.end()); double intercept = *(residuals.begin() + n / 2); return std::make_pair(intercept, slope); } void write_parameters(const Mrpt_Parameters *p, FILE *fd) const { if (!fd) { return; } fwrite(&p->n_trees, sizeof(int), 1, fd); fwrite(&p->depth, sizeof(int), 1, fd); fwrite(&p->votes, sizeof(int), 1, fd); fwrite(&p->k, sizeof(int), 1, fd); fwrite(&p->estimated_qtime, sizeof(double), 1, fd); fwrite(&p->estimated_recall, sizeof(double), 1, fd); } void read_parameters(Mrpt_Parameters *p, FILE *fd) { fread(&p->n_trees, sizeof(int), 1, fd); fread(&p->depth, sizeof(int), 1, fd); fread(&p->votes, sizeof(int), 1, fd); fread(&p->k, sizeof(int), 1, fd); fread(&p->estimated_qtime, sizeof(double), 1, fd); fread(&p->estimated_recall, sizeof(double), 1, fd); } void write_parameter_list(const std::set<Mrpt_Parameters,decltype(is_faster)*> &pars, FILE *fd) const { if (!fd) { return; } int par_sz = pars.size(); fwrite(&par_sz, sizeof(int), 1, fd); for (const auto p : pars) write_parameters(&p, fd); } void read_parameter_list(FILE *fd) { if (!fd) { return; } opt_pars = std::set<Mrpt_Parameters,decltype(is_faster)*>(is_faster); int par_sz = 0; fread(&par_sz, sizeof(int), 1, fd); for (int i = 0; i < par_sz; ++i) { Mrpt_Parameters p; read_parameters(&p, fd); opt_pars.insert(p); } } Mrpt_Parameters parameters(double target_recall) const { double tr = target_recall - epsilon; for (const auto &p : opt_pars) { if (p.estimated_recall > tr) { return p; } } if (!opt_pars.empty()) { return *(opt_pars.rbegin()); } return Mrpt_Parameters(); } /** * Computes the leaf sizes of a tree assuming a median split and that * when the number points is odd, the extra point is always assigned to * to the left branch. */ static void count_leaf_sizes(int n, int level, int tree_depth, std::vector<int> &out_leaf_sizes) { if (level == tree_depth) { out_leaf_sizes.push_back(n); return; } count_leaf_sizes(n - n / 2, level + 1, tree_depth, out_leaf_sizes); count_leaf_sizes(n / 2, level + 1, tree_depth, out_leaf_sizes); } /** * Computes indices of the first elements of leaves in a vector containing * all the leaves of a tree concatenated. Assumes that median split is used * and when the number points is odd, the extra point is always assigned to * the left branch. */ static void count_first_leaf_indices(std::vector<int> &indices, int n, int depth) { std::vector<int> leaf_sizes; count_leaf_sizes(n, 0, depth, leaf_sizes); indices = std::vector<int>(leaf_sizes.size() + 1); indices[0] = 0; for (int i = 0; i < (int) leaf_sizes.size(); ++i) indices[i + 1] = indices[i] + leaf_sizes[i]; } static void count_first_leaf_indices_all(std::vector<std::vector<int>> &indices, int n, int depth_max) { for (int d = 0; d <= depth_max; ++d) { std::vector<int> idx; count_first_leaf_indices(idx, n, d); indices.push_back(idx); } } static double predict_theil_sen(double x, std::pair<double,double> beta) { return beta.first + beta.second * x; } double get_candidate_set_size(int tree, int depth, int v) const { return cs_sizes[depth - depth_min](v - 1, tree - 1); } double get_projection_time(int n_trees, int depth, int v) const { return predict_theil_sen(n_trees * depth, beta_projection); } double get_voting_time(int n_trees, int depth, int v) const { const std::map<int,std::pair<double,double>> &beta = beta_voting[depth - depth_min]; if (v <= 0 || beta.empty()) { return 0.0; } for (const auto &b : beta) { if (v <= b.first) { return predict_theil_sen(n_trees, b.second); } } return predict_theil_sen(n_trees, beta.rbegin()->second); } double get_exact_time(int n_trees, int depth, int v) const { return predict_theil_sen(get_candidate_set_size(n_trees, depth, v), beta_exact); } double get_query_time(int tree, int depth, int v) const { return get_projection_time(tree, depth, v) + get_voting_time(tree, depth, v) + get_exact_time(tree, depth, v); } const Eigen::Map<const Eigen::MatrixXf> X; // the data matrix Eigen::MatrixXf split_points; // all split points in all trees std::vector<std::vector<int>> tree_leaves; // contains all leaves of all trees Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> dense_random_matrix; // random vectors needed for all the RP-trees Eigen::SparseMatrix<float, Eigen::RowMajor> sparse_random_matrix; // random vectors needed for all the RP-trees std::vector<std::vector<int>> leaf_first_indices_all; // first indices for each level std::vector<int> leaf_first_indices; // first indices of each leaf of tree in tree_leaves const int n_samples; // sample size of data const int dim; // dimension of data Mrpt_Parameters par; int n_trees = 0; // number of RP-trees int depth = 0; // depth of an RP-tree with median split float density = -1.0; // expected ratio of non-zero components in a projection matrix int n_pool = 0; // amount of random vectors needed for all the RP-trees int n_array = 0; // length of the one RP-tree as array int votes = 0; // optimal number of votes to use int k = 0; enum itype {normal, autotuned, autotuned_unpruned}; itype index_type = normal; // Member variables used in autotuning: int depth_min = 0; int votes_max = 0; const double epsilon = 0.0001; // error bound for comparisons of recall levels std::vector<Eigen::MatrixXd> cs_sizes; std::pair<double,double> beta_projection, beta_exact; std::vector<std::map<int,std::pair<double,double>>> beta_voting; std::set<Mrpt_Parameters,decltype(is_faster)*> opt_pars; }; #endif // CPP_MRPT_H_
tabu_search.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_SOLVER_TABU_SEARCH_TABU_SEARCH_H__ #define PRINTEMPS_SOLVER_TABU_SEARCH_TABU_SEARCH_H__ #include "../memory.h" #include "tabu_search_move_score.h" #include "tabu_search_print.h" #include "tabu_search_termination_status.h" #include "tabu_search_result.h" namespace printemps { namespace solver { namespace tabu_search { /*****************************************************************************/ template <class T_Variable, class T_Expression> TabuSearchResult<T_Variable, T_Expression> solve( model::Model<T_Variable, T_Expression>* a_model_ptr, // const option::Option& a_OPTION, // const std::vector<multi_array::ValueProxy<T_Variable>>& // a_INITIAL_VARIABLE_VALUE_PROXIES, // const solution::IncumbentHolder<T_Variable, T_Expression>& // a_INCUMBENT_HOLDER, // const Memory a_MEMORY) { /** * Define type aliases. */ using Model_T = model::Model<T_Variable, T_Expression>; using Result_T = TabuSearchResult<T_Variable, T_Expression>; using IncumbentHolder_T = solution::IncumbentHolder<T_Variable, T_Expression>; using Move_T = neighborhood::Move<T_Variable, T_Expression>; using MoveScore = TabuSearchMoveScore; /** * Start to measure computational time. */ utility::TimeKeeper time_keeper; /** * Copy arguments as local variables. */ Model_T* model_ptr = a_model_ptr; option::Option option = a_OPTION; Memory memory = a_MEMORY; IncumbentHolder_T incumbent_holder = a_INCUMBENT_HOLDER; /** * Reset the local augmented incumbent. */ incumbent_holder.reset_local_augmented_incumbent(); /** * Prepare a random generator, which is used for shuffling moves. */ std::mt19937 get_rand_mt(option.tabu_search.seed); /** * Initialize the solution and update the model. */ model_ptr->import_variable_values(a_INITIAL_VARIABLE_VALUE_PROXIES); model_ptr->update(); solution::SolutionScore current_solution_score = model_ptr->evaluate({}); solution::SolutionScore previous_solution_score = current_solution_score; int update_status = incumbent_holder.try_update_incumbent( model_ptr, current_solution_score); int total_update_status = solution::IncumbentHolderConstant::STATUS_NO_UPDATED; /** * Reset the last update iterations. */ memory.reset_last_update_iterations(); /** * Set up the tabu tenure and related parameters. */ int original_tabu_tenure = std::min(option.tabu_search.initial_tabu_tenure, model_ptr->number_of_mutable_variables()); int tabu_tenure = original_tabu_tenure; double intensity_previous = 0.0; double intensity_current = 0.0; int intensity_increase_count = 0; int intensity_decrease_count = 0; int last_tabu_tenure_updated_iteration = 0; /** * Prepare feasible solutions holder. */ std::vector<solution::SparseSolution<T_Variable, T_Expression>> feasible_solutions; /** * Reset the variable improvability. */ model_ptr->reset_variable_objective_improvability(); model_ptr->reset_variable_feasibility_improvability(); /** * Prepare other local variables. */ int number_of_all_neighborhoods = 0; int number_of_feasible_neighborhoods = 0; int number_of_permissible_neighborhoods = 0; int number_of_improvable_neighborhoods = 0; std::vector<solution::SolutionScore> trial_solution_scores; std::vector<MoveScore> trial_move_scores; std::vector<double> total_scores; std::vector<double> infeasible_local_penalties; int last_local_augmented_incumbent_update_iteration = -1; int last_global_augmented_incumbent_update_iteration = -1; int last_feasible_incumbent_update_iteration = -1; int local_augmented_incumbent_update_count = 0; TabuSearchTerminationStatus termination_status = TabuSearchTerminationStatus::ITERATION_OVER; neighborhood::Move<T_Variable, T_Expression> previous_move; neighborhood::Move<T_Variable, T_Expression> current_move; bool is_few_permissible_neighborhood = false; bool is_found_new_feasible_solution = false; double min_objective = current_solution_score.objective; double max_objective = current_solution_score.objective; double min_local_penalty = HUGE_VALF; if (!current_solution_score.is_feasible) { min_local_penalty = current_solution_score.local_penalty; } /** * Print the header of optimization progress table and print the initial * solution status. */ utility::print_single_line(option.verbose >= option::verbose::Full); utility::print_message("Tabu Search starts.", option.verbose >= option::verbose::Full); print_table_header(option.verbose >= option::verbose::Full); print_table_initial(model_ptr, // current_solution_score, // incumbent_holder, // option.verbose >= option::verbose::Full); /** * Iterations start. */ int iteration = 0; while (true) { /** * Check the terminating condition. */ double elapsed_time = time_keeper.clock(); if (elapsed_time > option.tabu_search.time_max) { termination_status = TabuSearchTerminationStatus::TIME_OVER; break; } if (elapsed_time + option.tabu_search.time_offset > option.time_max) { termination_status = TabuSearchTerminationStatus::TIME_OVER; break; } if (iteration >= option.tabu_search.iteration_max) { termination_status = TabuSearchTerminationStatus::ITERATION_OVER; break; } if (incumbent_holder.feasible_incumbent_objective() <= option.target_objective_value) { termination_status = TabuSearchTerminationStatus::REACH_TARGET; break; } if (local_augmented_incumbent_update_count > option.tabu_search.pruning_rate_threshold * option.tabu_search.iteration_max) { termination_status = TabuSearchTerminationStatus::EARLY_STOP; break; } /** * Update the moves. */ bool is_enabled_improvability_screening = (option.improvability_screening_mode != option::improvability_screening_mode::Off); bool accept_all = true; bool accept_objective_improvable = true; bool accept_feasibility_improvable = true; if (model_ptr->is_linear() && is_enabled_improvability_screening) { /** * If the option improvability_screening_mode is not None, * only improvable moves will be generated. */ auto changed_variable_ptrs = utility::to_vector( neighborhood::related_variable_ptrs(current_move)); if (iteration == 0) { model_ptr->update_variable_objective_improvability(); } else { model_ptr->update_variable_objective_improvability( changed_variable_ptrs); } switch (option.improvability_screening_mode) { case option::improvability_screening_mode::Soft: { if (model_ptr->is_feasible()) { accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = false; } else { model_ptr->reset_variable_feasibility_improvability(); model_ptr->update_variable_feasibility_improvability(); accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = true; } break; } case option::improvability_screening_mode::Aggressive: { if (model_ptr->is_feasible()) { accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = false; } else { model_ptr->reset_variable_feasibility_improvability(); model_ptr->update_variable_feasibility_improvability(); accept_all = false; accept_objective_improvable = false; accept_feasibility_improvable = true; } break; } case option::improvability_screening_mode::Intensive: { if (model_ptr->is_feasible()) { accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = false; } else { auto changed_constraint_ptrs = utility::to_vector( current_move.related_constraint_ptrs); if (iteration == 0) { model_ptr ->reset_variable_feasibility_improvability(); model_ptr ->update_variable_feasibility_improvability(); } else { model_ptr->reset_variable_feasibility_improvability( changed_constraint_ptrs); model_ptr ->update_variable_feasibility_improvability( changed_constraint_ptrs); } accept_all = false; accept_objective_improvable = false; accept_feasibility_improvable = true; } break; } default: { throw std::logic_error(utility::format_error_location( __FILE__, __LINE__, __func__, "The specified improvability screening mode is " "invalid.")); } } } model_ptr->neighborhood().update_moves( accept_all, // accept_objective_improvable, // accept_feasibility_improvable, // option.is_enabled_parallel_neighborhood_update); if (option.tabu_search.is_enabled_shuffle) { model_ptr->neighborhood().shuffle_moves(&get_rand_mt); } const auto& trial_move_ptrs = model_ptr->neighborhood().move_ptrs(); int number_of_moves = trial_move_ptrs.size(); if (option.tabu_search.is_enabled_move_curtail) { number_of_moves = static_cast<int>( floor(option.tabu_search.move_preserve_rate * number_of_moves)); } /** * If the number of the moves is zero, the tabu search iterations will * be terminated. */ if (number_of_moves == 0) { if (model_ptr->is_linear() && model_ptr->is_feasible()) { /** * If the current solution is feasible and there is no * improvable solution, the solution should be an optimum. * It can happen for decomp2 instance in MIPLIB 2017. */ termination_status = TabuSearchTerminationStatus::OPTIMAL; for (const auto& variable_ptr : model_ptr->variable_reference().variable_ptrs) { if (variable_ptr->is_objective_improvable()) { termination_status = TabuSearchTerminationStatus::NO_MOVE; break; } } break; } else { termination_status = TabuSearchTerminationStatus::NO_MOVE; break; } } /** * Reserve elements for vectors by the number of the moves. This step is * required for each iteration because the number of the moves can be * changed. */ trial_solution_scores.resize(number_of_moves); trial_move_scores.resize(number_of_moves); total_scores.resize(number_of_moves); #ifdef _OPENMP #pragma omp parallel for if (option.is_enabled_parallel_evaluation) \ schedule(static) #endif for (auto i = 0; i < number_of_moves; i++) { /** * The neighborhood solutions will be evaluated in parallel by fast * or ordinary(slow) evaluation methods. */ #ifndef _MPS_SOLVER if (model_ptr->is_enabled_fast_evaluation()) { #endif model_ptr->evaluate(&trial_solution_scores[i], // *trial_move_ptrs[i], // current_solution_score); #ifndef _MPS_SOLVER } else { model_ptr->evaluate(&trial_solution_scores[i], // *trial_move_ptrs[i]); } #endif evaluate_move(&trial_move_scores[i], // *trial_move_ptrs[i], // iteration, // memory, // option, // tabu_tenure); total_scores[i] = trial_solution_scores[i].local_augmented_objective + trial_move_scores[i].frequency_penalty; /** * If the move is "tabu", it will be set lower priorities in * selecting a move for the next solution. */ if (!trial_move_scores[i].is_permissible) { total_scores[i] += constant::LARGE_VALUE_50; } /** * If the move is special neighborhood moves, it must improves * objective or feasibility. */ if (trial_move_ptrs[i]->is_special_neighborhood_move && !(trial_solution_scores[i].is_objective_improvable || trial_solution_scores[i].is_feasibility_improvable)) { total_scores[i] += constant::LARGE_VALUE_100; } } /** * Select moves for the next solution. */ int argmin_global_augmented_objective = std::distance( trial_solution_scores.begin(), min_element(trial_solution_scores.begin(), trial_solution_scores.end(), [](const auto& a_FIRST, const auto& a_SECOND) { return a_FIRST.global_augmented_objective < a_SECOND.global_augmented_objective; })); int argmin_total_score = utility::argmin(total_scores); int selected_index = 0; bool is_aspirated = false; if (iteration < option.tabu_search.number_of_initial_modification) { /** * For diversification, the move for next solution will be randomly * selected for initial several iteration. */ selected_index = get_rand_mt() % number_of_moves; } else { /** * The move for next solution will be determined by evaluations of * solutions and moves after the inital modifications. */ selected_index = argmin_total_score; /** * A move which improves the augmented incumbent solution can be * accepted (optional). */ if (option.tabu_search.ignore_tabu_if_global_incumbent) { if (trial_solution_scores[argmin_global_augmented_objective] .global_augmented_objective + constant::EPSILON < incumbent_holder.global_augmented_incumbent_objective()) { selected_index = argmin_global_augmented_objective; if (!trial_move_scores[selected_index].is_permissible) { is_aspirated = true; } } } } /** * Backup the previous solution score and move. */ previous_solution_score = current_solution_score; previous_move = current_move; /** * Update the model by the selected move. */ Move_T* move_ptr = trial_move_ptrs[selected_index]; model_ptr->update(*move_ptr); /** * Update the current solution score and move. */ current_solution_score = trial_solution_scores[selected_index]; current_move = *move_ptr; min_objective = std::min(min_objective, current_solution_score.objective); max_objective = std::min(max_objective, current_solution_score.objective); if (!current_solution_score.is_feasible) { min_local_penalty = std::min(min_local_penalty, current_solution_score.local_penalty); } /** * Update the status. */ update_status = incumbent_holder.try_update_incumbent( model_ptr, current_solution_score); total_update_status = update_status | total_update_status; if (current_solution_score.is_feasible) { is_found_new_feasible_solution = true; } /** * Store the current feasible solution. */ if (option.is_enabled_store_feasible_solutions && current_solution_score.is_feasible) { feasible_solutions.push_back(model_ptr->export_plain_solution()); } /** * Update the memory. */ int random_width = static_cast<int>( option.tabu_search.tabu_tenure_randomize_rate * tabu_tenure); memory.update(*move_ptr, // iteration, // random_width, // &get_rand_mt); /** * To avoid cycling, each special neighborhood can be used only once in * one tabu search loop. */ if (move_ptr->is_special_neighborhood_move) { move_ptr->is_available = false; } /** * Calculate various statistics for logging. */ if (update_status & solution::IncumbentHolderConstant:: STATUS_LOCAL_AUGMENTED_INCUMBENT_UPDATE) { last_local_augmented_incumbent_update_iteration = iteration; } if (update_status & solution::IncumbentHolderConstant:: STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) { last_global_augmented_incumbent_update_iteration = iteration; } if (update_status & solution::IncumbentHolderConstant:: STATUS_FEASIBLE_INCUMBENT_UPDATE) { last_feasible_incumbent_update_iteration = iteration; } /** * For pruning, count updating of the local augmented incumbent without * global augmented incumbent improvement. */ if (update_status == solution::IncumbentHolderConstant:: STATUS_LOCAL_AUGMENTED_INCUMBENT_UPDATE) { local_augmented_incumbent_update_count++; } else if (update_status & solution::IncumbentHolderConstant:: STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) { local_augmented_incumbent_update_count = 0; } /** * Calculate the number of moves for each type. */ number_of_all_neighborhoods = number_of_moves; if (iteration % std::max(option.tabu_search.log_interval, 1) == 0 || update_status > 0) { number_of_feasible_neighborhoods = 0; number_of_permissible_neighborhoods = 0; number_of_improvable_neighborhoods = 0; for (const auto& score : trial_solution_scores) { if (score.is_feasible) { number_of_feasible_neighborhoods++; } if (score.is_objective_improvable || score.is_feasibility_improvable) { number_of_improvable_neighborhoods++; } } for (const auto& score : trial_move_scores) { if (score.is_permissible) { number_of_permissible_neighborhoods++; } } if (number_of_permissible_neighborhoods == 0) { is_few_permissible_neighborhood = true; } } else { bool is_few_permissible_neighborhood_temp = true; for (const auto& score : trial_move_scores) { if (score.is_permissible) { is_few_permissible_neighborhood_temp = false; break; } } if (is_few_permissible_neighborhood_temp) { is_few_permissible_neighborhood = true; } } /** * Register a chain move. */ if (iteration > 0 && option.is_enabled_chain_move) { if ((previous_move.sense == neighborhood::MoveSense::Binary && current_move.sense == neighborhood::MoveSense::Binary && previous_move.alterations.front().second != current_move.alterations.front().second) || (previous_move.sense == neighborhood::MoveSense::Chain && current_move.sense == neighborhood::MoveSense::Chain)) { Move_T chain_move; if (previous_move.alterations.front().first < current_move.alterations.front().first) chain_move = previous_move + current_move; else { chain_move = current_move + previous_move; } if (chain_move.overlap_rate > option.chain_move_overlap_rate_threshold && !neighborhood::has_duplicate_variable(chain_move)) { auto back_chain_move = chain_move; for (auto&& alteration : back_chain_move.alterations) { alteration.second = 1 - alteration.second; } model_ptr->neighborhood().chain().register_move(chain_move); model_ptr->neighborhood().chain().register_move( back_chain_move); } } } if (option.tabu_search.is_enabled_automatic_tabu_tenure_adjustment) { if ((update_status & solution::IncumbentHolderConstant:: STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) && tabu_tenure > original_tabu_tenure) { /** * The tabu tenure will be reverted to the original value if it * has been increased and the global incumbent is updated. */ tabu_tenure = original_tabu_tenure; last_tabu_tenure_updated_iteration = iteration; intensity_decrease_count = 0; intensity_increase_count = 0; utility::print_debug("Tabu tenure reverted: " + std::to_string(tabu_tenure) + ".", option.verbose >= option::verbose::Debug); } else if ((iteration - last_tabu_tenure_updated_iteration) % (tabu_tenure + 1) == 0) { /** * The intensity of searching will be computed with the interval * of tabu_tenure+1. The tabu tenure will be increased if the * intensity has grown up, and decreased if the intensity has * been reduced. */ intensity_previous = intensity_current; intensity_current = memory.intensity(); if (intensity_current > intensity_previous) { intensity_increase_count++; intensity_decrease_count = 0; if (intensity_increase_count > option.tabu_search.intensity_increase_count_threshold) { intensity_increase_count = 0; tabu_tenure = std::min(tabu_tenure + 1, model_ptr->number_of_mutable_variables()); last_tabu_tenure_updated_iteration = iteration; utility::print_debug( "Tabu tenure increased: " + std::to_string(tabu_tenure) + ".", option.verbose >= option::verbose::Debug); } } else { intensity_decrease_count++; intensity_increase_count = 0; if (intensity_decrease_count > option.tabu_search.intensity_decrease_count_threshold) { intensity_decrease_count = 0; tabu_tenure = std::max(tabu_tenure - 1, std::max(1, original_tabu_tenure / 2)); last_tabu_tenure_updated_iteration = iteration; utility::print_debug( "Tabu tenure decreased: " + std::to_string(tabu_tenure) + ".", option.verbose >= option::verbose::Debug); } } } } /** * Print the optimization progress. */ if (iteration % std::max(option.tabu_search.log_interval, 1) == 0 || update_status > 0) { print_table_body(model_ptr, // iteration, // current_move.is_special_neighborhood_move, // number_of_all_neighborhoods, // number_of_feasible_neighborhoods, // number_of_permissible_neighborhoods, // number_of_improvable_neighborhoods, // current_solution_score, // update_status, // incumbent_holder, // is_aspirated, // option.verbose >= option::verbose::Full); } if (option.tabu_search.is_enabled_automatic_break) { /** * If the local penalty us sufficiently larger than objective * sensitivity, the current loop will be terminated and the * local penalty coefficients will be adjusted. */ constexpr int ITERATION_MIN = 10; constexpr double MARGIN = 100.0; if (iteration > ITERATION_MIN && current_solution_score.is_feasible) { infeasible_local_penalties.clear(); for (const auto& score : trial_solution_scores) { if (!score.is_feasible) { infeasible_local_penalties.push_back( score.local_penalty); } } if (infeasible_local_penalties.size() > 0) { auto argminmax_objective_sensitivity_score_ptr = std::minmax_element( trial_solution_scores.begin(), trial_solution_scores.end(), [](const auto& a_FIRST, const auto& a_SECOND) { return a_FIRST.objective_improvement < a_SECOND.objective_improvement; }); double max_objective_sensitivity = std::max(argminmax_objective_sensitivity_score_ptr .second->objective_improvement, -argminmax_objective_sensitivity_score_ptr .first->objective_improvement); if (max_objective_sensitivity * MARGIN < utility::min(infeasible_local_penalties)) { termination_status = TabuSearchTerminationStatus::EARLY_STOP; break; } } } } iteration++; } /** * Print the footer of the optimization progress table. */ print_table_footer(option.verbose >= option::verbose::Full); /** * Prepare the result. */ Result_T result; result.incumbent_holder = incumbent_holder; result.memory = memory; result.total_update_status = total_update_status; result.tabu_tenure = tabu_tenure; result.number_of_iterations = iteration; result.last_local_augmented_incumbent_update_iteration = last_local_augmented_incumbent_update_iteration; result.last_global_augmented_incumbent_update_iteration = last_global_augmented_incumbent_update_iteration; result.last_feasible_incumbent_update_iteration = last_feasible_incumbent_update_iteration; result.is_few_permissible_neighborhood = is_few_permissible_neighborhood; result.is_found_new_feasible_solution = is_found_new_feasible_solution; auto abs_max_objective = std::max(fabs(max_objective), fabs(min_objective)); result.objective_constraint_rate = std::max(1.0, std::max(abs_max_objective, // max_objective - min_objective)) / std::max(1.0, min_local_penalty); result.termination_status = termination_status; result.feasible_solutions = feasible_solutions; return result; } } // namespace tabu_search } // namespace solver } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
GB_binop__band_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__band_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__band_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__band_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint16) // A*D function (colscale): GB (_AxD__band_uint16) // D*A function (rowscale): GB (_DxB__band_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__band_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__band_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint16) // C=scalar+B GB (_bind1st__band_uint16) // C=scalar+B' GB (_bind1st_tran__band_uint16) // C=A+scalar GB (_bind2nd__band_uint16) // C=A'+scalar GB (_bind2nd_tran__band_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT16 || GxB_NO_BAND_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__band_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__band_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__band_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__band_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__band_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ex02.c
/* Copyright (c) 2019 CSC Training */ /* Copyright (c) 2021 ENCCS */ #include <stdio.h> #include <math.h> #define NX 102400 int main(void) { double vecA[NX],vecB[NX],vecC[NX]; double r=0.2; /* Initialization of vectors */ for (int i = 0; i < NX; i++) { vecA[i] = pow(r, i); vecB[i] = 1.0; } /* dot product of two vectors */ #pragma omp target teams distribute parallel for for (int i = 0; i < NX; i++) { vecC[i] = vecA[i] * vecB[i]; } double sum = 0.0; /* calculate the sum */ for (int i = 0; i < NX; i++) { sum += vecC[i]; } printf("The sum is: %8.6f \n", sum); return 0; }
alignment.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* Original code from the Application Kernel Matrix by Cray */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <libgen.h> #include "param.h" #include "alignment.h" #include "bots.h" int readseqs(int first_seq, char *filename); int ktup, window, signif; int prot_ktup, prot_window, prot_signif; int gap_pos1, gap_pos2, mat_avscore; int nseqs, max_aa; #define MAX_ALN_LENGTH 5000 int *seqlen_array, def_aa_xref[NUMRES+1]; int *bench_output, *seq_output; double gap_open, gap_extend; double prot_gap_open, prot_gap_extend; double pw_go_penalty, pw_ge_penalty; double prot_pw_go_penalty, prot_pw_ge_penalty; char **args, **names, **seq_array; int matrix[NUMRES][NUMRES]; #define MIN(a,b) ((a)<(b)?(a):(b)) #define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k)) #define tegap(k) ((k) <= 0 ? 0 : te + gh * (k)) /*********************************************************************** * : **********************************************************************/ void del(int k, int *print_ptr, int *last_print, int *displ) { if (*last_print<0) *last_print = displ[(*print_ptr)-1] -= k; else *last_print = displ[(*print_ptr)++] = -k; } /*********************************************************************** * : **********************************************************************/ void add(int v, int *print_ptr, int *last_print, int *displ) { if (*last_print < 0) { displ[(*print_ptr)-1] = v; displ[(*print_ptr)++] = *last_print; } else { *last_print = displ[(*print_ptr)++] = v; } } /*********************************************************************** * : **********************************************************************/ int calc_score(int iat, int jat, int v1, int v2, int seq1, int seq2) { int i, j, ipos, jpos; ipos = v1 + iat; jpos = v2 + jat; i = seq_array[seq1][ipos]; j = seq_array[seq2][jpos]; return (matrix[i][j]); } /*********************************************************************** * : **********************************************************************/ int get_matrix(int *matptr, int *xref, int scale) { int gg_score = 0; int gr_score = 0; int i, j, k, ti, tj, ix; int av1, av2, av3, min, max, maxres; for (i = 0; i <= max_aa; i++) for (j = 0; j <= max_aa; j++) matrix[i][j] = 0; ix = 0; maxres = 0; for (i = 0; i <= max_aa; i++) { ti = xref[i]; for (j = 0; j <= i; j++) { tj = xref[j]; if ((ti != -1) && (tj != -1)) { k = matptr[ix]; if (ti == tj) { matrix[ti][ti] = k * scale; maxres++; } else { matrix[ti][tj] = k * scale; matrix[tj][ti] = k * scale; } ix++; } } } maxres--; av1 = av2 = av3 = 0; for (i = 0; i <= max_aa; i++) { for (j = 0; j <= i; j++) { av1 += matrix[i][j]; if (i == j) av2 += matrix[i][j]; else av3 += matrix[i][j]; } } av1 /= (maxres*maxres)/2; av2 /= maxres; av3 /= ((double)(maxres*maxres-maxres))/2; mat_avscore = -av3; min = max = matrix[0][0]; for (i = 0; i <= max_aa; i++) for (j = 1; j <= i; j++) { if (matrix[i][j] < min) min = matrix[i][j]; if (matrix[i][j] > max) max = matrix[i][j]; } for (i = 0; i < gap_pos1; i++) { matrix[i][gap_pos1] = gr_score; matrix[gap_pos1][i] = gr_score; matrix[i][gap_pos2] = gr_score; matrix[gap_pos2][i] = gr_score; } matrix[gap_pos1][gap_pos1] = gg_score; matrix[gap_pos2][gap_pos2] = gg_score; matrix[gap_pos2][gap_pos1] = gg_score; matrix[gap_pos1][gap_pos2] = gg_score; maxres += 2; return(maxres); } /*********************************************************************** * : **********************************************************************/ void forward_pass(char *ia, char *ib, int n, int m, int *se1, int *se2, int *maxscore, int g, int gh) { int i, j, f, p, t, hh; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; *maxscore = 0; *se1 = *se2 = 0; for (i = 0; i <= m; i++) {HH[i] = 0; DD[i] = -g;} for (i = 1; i <= n; i++) { hh = p = 0; f = -g; for (j = 1; j <= m; j++) { f -= gh; t = hh - g - gh; if (f < t) f = t; DD[j] -= gh; t = HH[j] - g - gh; if (DD[j] < t) DD[j] = t; hh = p + matrix[(int)ia[i]][(int)ib[j]]; if (hh < f) hh = f; if (hh < DD[j]) hh = DD[j]; if (hh < 0) hh = 0; p = HH[j]; HH[j] = hh; if (hh > *maxscore) {*maxscore = hh; *se1 = i; *se2 = j;} } } } /*********************************************************************** * : **********************************************************************/ void reverse_pass(char *ia, char *ib, int se1, int se2, int *sb1, int *sb2, int maxscore, int g, int gh) { int i, j, f, p, t, hh, cost; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; cost = 0; *sb1 = *sb2 = 1; for (i = se2; i > 0; i--){ HH[i] = -1; DD[i] = -1;} for (i = se1; i > 0; i--) { hh = f = -1; if (i == se1) p = 0; else p = -1; for (j = se2; j > 0; j--) { f -= gh; t = hh - g - gh; if (f < t) f = t; DD[j] -= gh; t = HH[j] - g - gh; if (DD[j] < t) DD[j] = t; hh = p + matrix[(int)ia[i]][(int)ib[j]]; if (hh < f) hh = f; if (hh < DD[j]) hh = DD[j]; p = HH[j]; HH[j] = hh; if (hh > cost) { cost = hh; *sb1 = i; *sb2 = j; if (cost >= maxscore) break; } } if (cost >= maxscore) break; } } /*********************************************************************** * : **********************************************************************/ int diff (int A, int B, int M, int N, int tb, int te, int *print_ptr, int *last_print, int *displ, int seq1, int seq2, int g, int gh) { int i, j, f, e, s, t, hh; int midi, midj, midh, type; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; int RR[MAX_ALN_LENGTH]; int SS[MAX_ALN_LENGTH]; if (N <= 0) {if (M > 0) del(M, print_ptr, last_print, displ); return( - (int) tbgap(M)); } if (M <= 1) { if (M <= 0) {add(N, print_ptr, last_print, displ); return( - (int)tbgap(N));} midh = -(tb+gh) - tegap(N); hh = -(te+gh) - tbgap(N); if (hh > midh) midh = hh; midj = 0; for (j = 1; j <= N; j++) { hh = calc_score(1,j,A,B,seq1,seq2) - tegap(N-j) - tbgap(j-1); if (hh > midh) {midh = hh; midj = j;} } if (midj == 0) { del(1, print_ptr, last_print, displ); add(N, print_ptr, last_print, displ); } else { if (midj > 1) add(midj-1, print_ptr, last_print, displ); displ[(*print_ptr)++] = *last_print = 0; if (midj < N) add(N-midj, print_ptr, last_print, displ); } return midh; } midi = M / 2; HH[0] = 0.0; t = -tb; for (j = 1; j <= N; j++) { HH[j] = t = t - gh; DD[j] = t - g; } t = -tb; for (i = 1; i <= midi; i++) { s = HH[0]; HH[0] = hh = t = t - gh; f = t - g; for (j = 1; j <= N; j++) { if ((hh = hh - g - gh) > (f = f - gh)) f = hh; if ((hh = HH[j] - g - gh) > (e = DD[j]- gh)) e = hh; hh = s + calc_score(i,j,A,B,seq1,seq2); if (f > hh) hh = f; if (e > hh) hh = e; s = HH[j]; HH[j] = hh; DD[j] = e; } } DD[0] = HH[0]; RR[N] = 0; t = -te; for (j = N-1; j >= 0; j--) {RR[j] = t = t - gh; SS[j] = t - g;} t = -te; for (i = M - 1; i >= midi; i--) { s = RR[N]; RR[N] = hh = t = t-gh; f = t - g; for (j = N - 1; j >= 0; j--) { if ((hh = hh - g - gh) > (f = f - gh)) f = hh; if ((hh = RR[j] - g - gh) > (e = SS[j] - gh)) e = hh; hh = s + calc_score(i+1,j+1,A,B,seq1,seq2); if (f > hh) hh = f; if (e > hh) hh = e; s = RR[j]; RR[j] = hh; SS[j] = e; } } SS[N] = RR[N]; midh = HH[0] + RR[0]; midj = 0; type = 1; for (j = 0; j <= N; j++) { hh = HH[j] + RR[j]; if (hh >= midh) if (hh > midh || (HH[j] != DD[j] && RR[j] == SS[j])) {midh = hh; midj = j;} } for (j = N; j >= 0; j--) { hh = DD[j] + SS[j] + g; if (hh > midh) {midh = hh;midj = j;type = 2;} } if (type == 1) { diff(A, B, midi, midj, tb, g, print_ptr, last_print, displ, seq1, seq2, g, gh); diff(A+midi, B+midj, M-midi, N-midj, g, te, print_ptr, last_print, displ, seq1, seq2, g, gh); } else { diff(A, B, midi-1, midj, tb, 0.0, print_ptr, last_print, displ, seq1, seq2, g, gh); del(2, print_ptr, last_print, displ); diff(A+midi+1, B+midj, M-midi-1, N-midj, 0.0, te, print_ptr, last_print, displ, seq1, seq2, g, gh); } return midh; } /*********************************************************************** * : **********************************************************************/ double tracepath(int tsb1, int tsb2, int *print_ptr, int *last_print, int *displ, int seq1, int seq2) { int i, k; int i1 = tsb1; int i2 = tsb2; int pos = 0; int count = 0; for (i = 1; i <= *print_ptr - 1; ++i) { if (displ[i]==0) { char c1 = seq_array[seq1][i1]; char c2 = seq_array[seq2][i2]; if ((c1!=gap_pos1) && (c1 != gap_pos2) && (c1 == c2)) count++; ++i1; ++i2; ++pos; } else if ((k = displ[i]) > 0) { i2 += k; pos += k; } else { i1 -= k; pos -= k; } } return (100.0 * (double) count); } int pairalign(int istart, int iend, int jstart, int jend) { int i, n, m, si, sj; int len1, len2, maxres; double gg, mm_score; int *mat_xref, *matptr; matptr = gon250mt; mat_xref = def_aa_xref; maxres = get_matrix(matptr, mat_xref, 10); if (maxres == 0) return(-1); bots_message("Start aligning "); #pragma omp parallel { #pragma omp single private(i,n,si,sj,len1,m) for (si = 0; si < nseqs; si++) { if ((n = seqlen_array[si+1]) != 0){ for (i = 1, len1 = 0; i <= n; i++) { char c = seq_array[si+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len1++; } for (sj = si + 1; sj < nseqs; sj++) { if ((m = seqlen_array[sj+1]) != 0) { #pragma omp task untied \ private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \ shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore) { int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh; int displ[2*MAX_ALN_LENGTH+1]; int print_ptr, last_print; for (i = 1, len2 = 0; i <= m; i++) { char c = seq_array[sj+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len2++; } gh = 10 * pw_ge_penalty; gg = pw_go_penalty + log((double) MIN(n, m)); g = (mat_avscore <= 0) ? 20 * gg : 2 * mat_avscore * gg; seq1 = si + 1; seq2 = sj + 1; forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh); reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh); print_ptr = 1; last_print = 0; diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh); mm_score = tracepath(sb1, sb2, &print_ptr, &last_print, displ, seq1, seq2); if (len1 == 0 || len2 == 0) mm_score = 0.0; else mm_score /= (double) MIN(len1,len2); bench_output[si*nseqs+sj] = mm_score; } } } } } } bots_message(" completed!\n"); return 0; } int pairalign_seq(int istart, int iend, int jstart, int jend) { int i, n, m, si, sj; int len1, len2, maxres; double gg, mm_score; int *mat_xref, *matptr; matptr = gon250mt; mat_xref = def_aa_xref; maxres = get_matrix(matptr, mat_xref, 10); if (maxres == 0) return(-1); for (si = 0; si < nseqs; si++) { if ((n = seqlen_array[si+1]) != 0){ for (i = 1, len1 = 0; i <= n; i++) { char c = seq_array[si+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len1++; } for (sj = si + 1; sj < nseqs; sj++) { if ((m = seqlen_array[sj+1]) != 0){ int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh; int displ[2*MAX_ALN_LENGTH+1]; int print_ptr, last_print; for (i = 1, len2 = 0; i <= m; i++) { char c = seq_array[sj+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len2++; } gh = 10 * pw_ge_penalty; gg = pw_go_penalty + log((double) MIN(n, m)); g = (mat_avscore <= 0) ? 20 * gg : 2 * mat_avscore * gg; seq1 = si + 1; seq2 = sj + 1; forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh); reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh); print_ptr = 1; last_print = 0; diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh); mm_score = tracepath(sb1, sb2, &print_ptr, &last_print, displ, seq1, seq2); if (len1 == 0 || len2 == 0) mm_score = 0.0; else mm_score /= (double) MIN(len1,len2); seq_output[si*nseqs+sj] = mm_score; } } } } return 0; } /*********************************************************************** * : **********************************************************************/ void init_matrix(void) { int i, j; char c1, c2; gap_pos1 = NUMRES - 2; gap_pos2 = NUMRES - 1; max_aa = strlen(amino_acid_codes) - 2; for (i = 0; i < NUMRES; i++) def_aa_xref[i] = -1; for (i = 0; (c1 = amino_acid_order[i]); i++) for (j = 0; (c2 = amino_acid_codes[j]); j++) if (c1 == c2) {def_aa_xref[i] = j; break;} } void pairalign_init (char *filename) { int i; if (!filename || !filename[0]) { bots_error(0, "Please specify an input file with the -f option\n"); } init_matrix(); nseqs = readseqs(1,filename); bots_message("Multiple Pairwise Alignment (%d sequences)\n",nseqs); for (i = 1; i <= nseqs; i++) bots_debug("Sequence %d: %s %6.d aa\n", i, names[i], seqlen_array[i]); ktup = 1; window = 5; signif = 5; gap_open = 10.0; gap_extend = 0.2; pw_go_penalty = 10.0; pw_ge_penalty = 0.1; } void align_init () { int i,j; bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs); for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) bench_output[i*nseqs+j] = 0; } void align() { pairalign(0, nseqs,0, nseqs); } void align_seq_init () { int i,j; seq_output = (int *) malloc(sizeof(int)*nseqs*nseqs); bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs); for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) seq_output[i*nseqs+j] = 0; } void align_seq() { pairalign_seq(0, nseqs,0, nseqs); } void align_end () { int i,j; for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) if (bench_output[i*nseqs+j] != 0) bots_debug("Benchmark sequences (%d:%d) Aligned. Score: %d\n", i+1 , j+1 , (int) bench_output[i*nseqs+j]); } int align_verify () { int i,j; int result = BOTS_RESULT_SUCCESSFUL; for(i = 0; i<nseqs; i++) { for(j = 0; j<nseqs; j++) { if (bench_output[i*nseqs+j] != seq_output[i*nseqs+j]) { bots_message("Error: Optimized prot. (%3d:%3d)=%5d Sequential prot. (%3d:%3d)=%5d\n", i+1, j+1, (int) bench_output[i*nseqs+j], i+1, j+1, (int) seq_output[i*nseqs+j]); result = BOTS_RESULT_UNSUCCESSFUL; } } } return result; }
par_mod_lr_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle = NULL; HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_q, *D_w; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index; HYPRE_Int i, j; HYPRE_Int *cpt_array; HYPRE_Int *start_array; HYPRE_Int *startf_array; HYPRE_Int start, stop, startf, stopf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); n_Cpts = num_cpts_global[1] - num_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); startf_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,row) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Real beta, gamma; start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } start_array[my_thread_num + 1] = stop; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i] += cpt_array[i - 1]; } if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { startf = start - cpt_array[my_thread_num - 1]; } else { startf = 0; } if (my_thread_num < num_threads - 1) { stopf = stop - cpt_array[my_thread_num]; } else { stopf = n_Fpts; } startf_array[my_thread_num + 1] = stopf; /* Create D_q = D_beta */ for (i = startf; i < stopf; i++) { for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { D_q[i] += As_FC_diag_data[j]; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { D_q[i] += As_FC_offd_data[j]; } } /* Create D_w = D_alpha + D_gamma */ row = startf; for (i = start; i < stop; i++) { if (CF_marker[i] < 0) { if (num_functions > 1) { HYPRE_Int jA, jS, jC; jC = A_diag_i[i]; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else { jC++; } jA = A_diag_j[jC]; } jC++; } for (j = jC; j < A_diag_i[i + 1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) { D_w[row] += A_diag_data[j]; } } jC = A_offd_i[i]; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else { jC++; } jA = A_offd_j[jC]; } jC++; } for (j = jC; j < A_offd_i[i + 1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) { D_w[row] += A_offd_data[j]; } } row++; } else { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { D_w[row] += A_diag_data[j]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { D_w[row] += A_offd_data[j]; } for (j = As_FF_diag_i[row] + 1; j < As_FF_diag_i[row + 1]; j++) { D_w[row] -= As_FF_diag_data[j]; } for (j = As_FF_offd_i[row]; j < As_FF_offd_i[row + 1]; j++) { D_w[row] -= As_FF_offd_data[j]; } D_w[row] -= D_q[row]; row++; } } } for (i = startf; i < stopf; i++) { j = As_FF_diag_i[i]; if (D_w[i]) { beta = 1.0 / D_w[i]; } else { beta = 1.0; } As_FF_diag_data[j] = beta * D_q[i]; if (D_q[i]) { gamma = -1.0 / D_q[i]; } else { gamma = 1.0; } for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { As_FF_diag_data[j] *= beta; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { As_FF_offd_data[j] *= beta; } for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { As_FC_diag_data[j] *= gamma; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { As_FC_offd_data[j] *= gamma; } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); startf = startf_array[my_thread_num]; stopf = startf_array[my_thread_num + 1]; start = start_array[my_thread_num]; stop = start_array[my_thread_num + 1]; if (my_thread_num > 0) { c_pt = cpt_array[my_thread_num - 1]; } else { c_pt = 0; } cnt_diag = W_diag_i[startf] + c_pt; cnt_offd = W_offd_i[startf]; row = startf; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; } else { for (j = W_diag_i[row]; j < W_diag_i[row + 1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j = W_offd_i[row]; j < W_offd_i[row + 1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; } P_diag_i[i + 1] = cnt_diag; P_offd_i[i + 1] = cnt_offd; } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, num_cols_P_offd, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) { P_marker[P_offd_j[i]] = 1; } new_ncols_P_offd = 0; for (i = 0; i < num_cols_P_offd; i++) { if (P_marker[i]) { new_ncols_P_offd++; } } new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_q, HYPRE_MEMORY_HOST); hypre_TFree(D_w, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(startf_array, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); return hypre_error_flag; } /*-----------------------------------------------------------------------* * Modularized Extended Interpolation *-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ModExtInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildExtInterpDevice(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildModExtInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPIInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle = NULL; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; hypre_CSRMatrix *As_FF_ext = NULL; HYPRE_Real *As_FF_ext_data = NULL; HYPRE_Int *As_FF_ext_i = NULL; HYPRE_BigInt *As_FF_ext_j = NULL; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_q, *D_w, *D_theta, *D_q_offd = NULL; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_diag_j; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FF_offd_j = NULL; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j = NULL; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data = NULL; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data = NULL; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data = NULL; HYPRE_Real *buf_data = NULL; HYPRE_Real *tmp_FF_diag_data = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_BigInt first_index; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index, startc, num_sends; HYPRE_Int i, j, jj, k, kk; HYPRE_Int *cpt_array; HYPRE_Int *start_array; HYPRE_Int *startf_array; HYPRE_Int start, stop, startf, stopf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt; HYPRE_Int num_cols_A_FF_offd; HYPRE_Real value, value1, theta; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); n_Cpts = num_cpts_global[1] - num_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); if (num_procs > 1) { As_FF_ext = hypre_ParCSRMatrixExtractBExt(As_FF, As_FF, 1); As_FF_ext_i = hypre_CSRMatrixI(As_FF_ext); As_FF_ext_j = hypre_CSRMatrixBigJ(As_FF_ext); As_FF_ext_data = hypre_CSRMatrixData(As_FF_ext); } As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd); first_index = hypre_ParCSRMatrixRowStarts(As_FF)[0]; tmp_FF_diag_data = hypre_CTAlloc(HYPRE_Real, As_FF_diag_i[n_Fpts], HYPRE_MEMORY_HOST); D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_theta = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); startf_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,k,kk,start,stop,startf,stopf,row,theta,value,value1) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } start_array[my_thread_num + 1] = stop; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i] += cpt_array[i - 1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { startf = start - cpt_array[my_thread_num - 1]; } else { startf = 0; } if (my_thread_num < num_threads - 1) { stopf = stop - cpt_array[my_thread_num]; } else { stopf = n_Fpts; } startf_array[my_thread_num + 1] = stopf; for (i = startf; i < stopf; i++) { for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { D_q[i] += As_FC_diag_data[j]; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { D_q[i] += As_FC_offd_data[j]; } } for (j = As_FF_diag_i[startf]; j < As_FF_diag_i[stopf]; j++) { tmp_FF_diag_data[j] = As_FF_diag_data[j]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_FF_offd) { D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, HYPRE_MEMORY_HOST); } index = 0; comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); if (!comm_pkg) { hypre_MatvecCommPkgCreate(As_FF); comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = startf; for (i = start; i < stop; i++) { HYPRE_Int jA, jC, jS; if (CF_marker[i] < 0) { if (num_functions > 1) { jC = A_diag_i[i]; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else { jC++; } jA = A_diag_j[jC]; } jC++; } for (j = jC; j < A_diag_i[i + 1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) { D_w[row] += A_diag_data[j]; } } jC = A_offd_i[i]; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else { jC++; } jA = A_offd_j[jC]; } jC++; } for (j = jC; j < A_offd_i[i + 1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) { D_w[row] += A_offd_data[j]; } } row++; } else { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { D_w[row] += A_diag_data[j]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { D_w[row] += A_offd_data[j]; } for (j = As_FF_diag_i[row] + 1; j < As_FF_diag_i[row + 1]; j++) { D_w[row] -= As_FF_diag_data[j]; } for (j = As_FF_offd_i[row]; j < As_FF_offd_i[row + 1]; j++) { D_w[row] -= As_FF_offd_data[j]; } D_w[row] -= D_q[row]; row++; } } } for (i = startf; i < stopf; i++) { for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { jj = As_FF_diag_j[j]; value = D_q[jj]; for (k = As_FF_diag_i[jj] + 1; k < As_FF_diag_i[jj + 1]; k++) { kk = As_FF_diag_j[k]; if (kk == i) { value1 = tmp_FF_diag_data[k]; value += value1; D_theta[i] += As_FF_diag_data[j] * value1 / value; break; } } As_FF_diag_data[j] /= value; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { jj = As_FF_offd_j[j]; value = D_q_offd[jj]; for (k = As_FF_ext_i[jj]; k < As_FF_ext_i[jj + 1]; k++) { kk = (HYPRE_Int)(As_FF_ext_j[k] - first_index); if (kk == i) { value1 = As_FF_ext_data[k]; value += value1; D_theta[i] += As_FF_offd_data[j] * value1 / value; break; } } As_FF_offd_data[j] /= value; } As_FF_diag_data[As_FF_diag_i[i]] = 1.0; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = startf; i < stopf; i++) { theta = (D_theta[i] + D_w[i]); if (theta) { theta = -1.0 / theta; for (j = As_FF_diag_i[i]; j < As_FF_diag_i[i + 1]; j++) { As_FF_diag_data[j] *= theta; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { As_FF_offd_data[j] *= theta; } } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); startf = startf_array[my_thread_num]; stopf = startf_array[my_thread_num + 1]; start = start_array[my_thread_num]; stop = start_array[my_thread_num + 1]; if (my_thread_num > 0) { c_pt = cpt_array[my_thread_num - 1]; } else { c_pt = 0; } cnt_diag = W_diag_i[startf] + c_pt; cnt_offd = W_offd_i[startf]; row = startf; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; } else { for (j = W_diag_i[row]; j < W_diag_i[row + 1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j = W_offd_i[row]; j < W_offd_i[row + 1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; } P_diag_i[i + 1] = cnt_diag; P_offd_i[i + 1] = cnt_offd; } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, num_cols_P_offd, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) { P_marker[P_offd_j[i]] = 1; } new_ncols_P_offd = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_ncols_P_offd++; } new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_q, HYPRE_MEMORY_HOST); hypre_TFree(D_q_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_w, HYPRE_MEMORY_HOST); hypre_TFree(D_theta, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(startf_array, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(tmp_FF_diag_data, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); hypre_CSRMatrixDestroy(As_FF_ext); return hypre_error_flag; } /*-----------------------------------------------------------------------* * Modularized Extended+i Interpolation *-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ModExtPIInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildModExtPIInterpHost(A, CF_marker, S, num_cpts_global, debug_flag, num_functions, dof_func, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtPEInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPEInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle = NULL; HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_beta, *D_w, *D_lambda, *D_tmp, *D_tau, *D_tmp_offd = NULL; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_diag_j; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FF_offd_j; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j = NULL; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data = NULL; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data = NULL; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data = NULL; HYPRE_Real *buf_data = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index, startc, num_sends; HYPRE_Int i, j; HYPRE_Int *cpt_array; HYPRE_Int *start_array; HYPRE_Int *startf_array; HYPRE_Int start, stop, startf, stopf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt; HYPRE_Int num_cols_A_FF_offd; HYPRE_Real value, theta; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); n_Cpts = num_cpts_global[1] - num_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd); D_beta = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_lambda = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_tmp = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_tau = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); startf_array = hypre_CTAlloc(HYPRE_Int, num_threads + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,row,theta,value) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } start_array[my_thread_num + 1] = stop; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < num_threads; i++) { cpt_array[i] += cpt_array[i - 1]; } if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { startf = start - cpt_array[my_thread_num - 1]; } else { startf = 0; } if (my_thread_num < num_threads - 1) { stopf = stop - cpt_array[my_thread_num]; } else { stopf = n_Fpts; } startf_array[my_thread_num + 1] = stopf; for (i = startf; i < stopf; i++) { HYPRE_Real number; for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { D_lambda[i] += As_FF_diag_data[j]; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { D_lambda[i] += As_FF_offd_data[j]; } number = (HYPRE_Real)(As_FF_diag_i[i + 1] - As_FF_diag_i[i] - 1 + As_FF_offd_i[i + 1] - As_FF_offd_i[i]); if (number) { D_lambda[i] /= number; } for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { D_beta[i] += As_FC_diag_data[j]; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { D_beta[i] += As_FC_offd_data[j]; } if (D_lambda[i] + D_beta[i]) { D_tmp[i] = D_lambda[i] / (D_beta[i] + D_lambda[i]); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_FF_offd) { D_tmp_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, HYPRE_MEMORY_HOST); } index = 0; comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); if (!comm_pkg) { hypre_MatvecCommPkgCreate(As_FF); comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { buf_data[index++] = D_tmp[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_tmp_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = startf; for (i = start; i < stop; i++) { if (CF_marker[i] < 0) { if (num_functions > 1) { HYPRE_Int jA, jC, jS; jC = A_diag_i[i]; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else { jC++; } jA = A_diag_j[jC]; } jC++; } for (j = jC; j < A_diag_i[i + 1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) { D_w[row] += A_diag_data[j]; } } jC = A_offd_i[i]; for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else { jC++; } jA = A_offd_j[jC]; } jC++; } for (j = jC; j < A_offd_i[i + 1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) { D_w[row] += A_offd_data[j]; } } row++; } else { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { D_w[row] += A_diag_data[j]; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { D_w[row] += A_offd_data[j]; } for (j = As_FF_diag_i[row] + 1; j < As_FF_diag_i[row + 1]; j++) { D_w[row] -= As_FF_diag_data[j]; } for (j = As_FF_offd_i[row]; j < As_FF_offd_i[row + 1]; j++) { D_w[row] -= As_FF_offd_data[j]; } D_w[row] -= D_beta[row]; row++; } } } for (i = startf; i < stopf; i++) { for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { index = As_FF_diag_j[j]; D_tau[i] += As_FF_diag_data[j] * D_tmp[index]; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { index = As_FF_offd_j[j]; D_tau[i] += As_FF_offd_data[j] * D_tmp_offd[index]; } } for (i = startf; i < stopf; i++) { value = D_w[i] + D_tau[i]; if (value) { value = -1.0 / value; } theta = D_beta[i] + D_lambda[i]; As_FF_diag_data[As_FF_diag_i[i]] = value * theta; if (theta) { theta = 1.0 / theta; } for (j = As_FF_diag_i[i] + 1; j < As_FF_diag_i[i + 1]; j++) { As_FF_diag_data[j] *= value; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i + 1]; j++) { As_FF_offd_data[j] *= value; } for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i + 1]; j++) { As_FC_diag_data[j] *= theta; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i + 1]; j++) { As_FC_offd_data[j] *= theta; } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); startf = startf_array[my_thread_num]; stopf = startf_array[my_thread_num + 1]; start = start_array[my_thread_num]; stop = start_array[my_thread_num + 1]; if (my_thread_num > 0) { c_pt = cpt_array[my_thread_num - 1]; } else { c_pt = 0; } cnt_diag = W_diag_i[startf] + c_pt; cnt_offd = W_offd_i[startf]; row = startf; for (i = start; i < stop; i++) { if (CF_marker[i] > 0) { P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; } else { for (j = W_diag_i[row]; j < W_diag_i[row + 1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j = W_offd_i[row]; j < W_offd_i[row + 1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; } P_diag_i[i + 1] = cnt_diag; P_offd_i[i + 1] = cnt_offd; } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, num_cols_P_offd, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) { P_marker[P_offd_j[i]] = 1; } new_ncols_P_offd = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_ncols_P_offd++; } new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); hypre_TFree(D_tmp_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_w, HYPRE_MEMORY_HOST); hypre_TFree(D_tau, HYPRE_MEMORY_HOST); hypre_TFree(D_beta, HYPRE_MEMORY_HOST); hypre_TFree(D_lambda, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(startf_array, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); return hypre_error_flag; } /*-----------------------------------------------------------------------* * Modularized Extended+e Interpolation *-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPEInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ModExtPEInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGBuildExtPEInterpDevice(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, P_ptr); } else #endif { ierr = hypre_BoomerAMGBuildModExtPEInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
jacobi-avx-peel.c
#include <immintrin.h> // Jacobi stencil // AVX intrinsics + manual peeling + streaming stores // typedef double adouble __attribute__ ((aligned(16))); inline void kernel(adouble* v1, adouble * v2, int m) { __m256d alpha = _mm256_set1_pd(0.25); // __m256d phi_e = _mm256_loadu_pd (v1 + 1 ); __m256d phi_w = _mm256_loadu_pd (v1 - 1 ); __m256d phi_n = _mm256_loadu_pd (v1 + m); __m256d phi_s = _mm256_loadu_pd (v1 - m); // phi_e = _mm256_add_pd(phi_e, phi_s); phi_e = _mm256_add_pd(phi_e, phi_n); //phi_e = _mm_fmadd_pd(alpha, phi_e, phi_w); phi_e = _mm256_add_pd(phi_e, phi_w); phi_e = _mm256_mul_pd(alpha, phi_e); // //printf("-> p = %p\n", &v2[0]); _mm256_stream_pd(v2, phi_e); } inline void kernel_sequential(double* v1, double * v2, int m) { double phi_e = *(v1 + 1); double phi_w = *(v1 - 1); double phi_n = *(v1 + m); double phi_s = *(v1 - m); double phi = 0.25*(phi_e + phi_w + phi_n + phi_s); *(v2) = phi; } void laplacian(double* v1, double* v2, int dim_m, int dim_n) { // #pragma omp parallel for schedule(static) for (int j = 1; j < dim_n - 1; ++j ) { int kstart = 1; while ( ((long) &v2[j*dim_m + kstart]) & 0x000000000000001F ) { kstart++; } int i = 1; for (; i < kstart; ++i) { kernel_sequential(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n); } for (; i < dim_m - 1 - (dim_m - 1)%4; i = i + 4) { kernel(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n); } //asm volatile ("mfence" ::: "memory"); for (; i < dim_m - 1; ++i) { kernel_sequential(v1 + j*dim_n + i, v2 + j*dim_n + i, dim_n); } } #pragma omp parallel { _mm_sfence(); } }
OMP.c
#define CHUNK 1024*1024 // Run CHUNK iterations and check error #define LOG 1024 // Print progress each LOG iterations #define LIMIT 1024*1024 // LIMIT of iterations #include "../common.h" int main(int argc, char *argv[]) { unsigned int digits; unsigned int threads; double precision; getParams(argc, argv, &threads, &digits, &precision); double sum= 0.0, pi, error= 1.0; omp_set_num_threads(threads); unsigned long i = 0; while (error > precision && i < LIMIT) { #pragma omp parallel for reduction(+:sum) for (unsigned long n = i * CHUNK; n < (i + 1) * CHUNK; ++n) { if (n % 2 == 0) sum += 1.0 / ((n << 1) + 1); else sum -= 1.0 / ((n << 1) + 1); } pi = 4.0 * sum; error = getError(pi); printLog(precision, pi, error, ++i); } return EXIT_SUCCESS; }
dynamic_enough_threads.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" int main() { omp_set_dynamic(1); #pragma omp parallel num_threads(4) { print_ids(0); print_ids(1); } print_fuzzy_address(1); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' //team-size of 1-4 is expected // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]], team_size={{[1-4]}} // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] return 0; }
DRB057-jacobiinitialize-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Use of private() clause */ #include <stdio.h> #include <math.h> #define MSIZE 200 int n=MSIZE, m=MSIZE; double alpha = 0.0543; double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE]; double dx, dy; void initialize () { int i, j, xx, yy; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(i ,j ,xx ,yy ) for (i = 0; i < n; i++) #pragma omp parallel for private(j ,xx ,yy ) for (j = 0; j < m; j++) { xx = (int) (-1.0 + dx * (i - 1)); /* -1 < x < 1 */ yy = (int) (-1.0 + dy * (j - 1)); /* -1 < y < 1 */ u[i][j] = 0.0; f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy) - 2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy); } } int main() { initialize(); int i, j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%lf %lf\n", u[i][j], f[i][j]); } } return 0; }
sparselu.balance.c
#include "hclib.h" #include <omp.h> int ____num_tasks[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include "bots.h" #include "sparselu.h" /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if ( r_err == 0.0 ) continue; if (r_err < 0.0 ) r_err = -r_err; if ( M[i*bots_arg_size_1+j] == 0 ) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; \n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j]); return FALSE; } r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { bots_message("Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); return FALSE; } } } return TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; int a=0,b=0; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ a++; M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { bots_message("Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { b++; M[ii*bots_arg_size+jj] = NULL; } } } bots_debug("allo = %d, no = %d, total = %d, factor = %f\n",a,b,a+b,(float)((float)a/(float)(a+b))); } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; bots_message("Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {bots_message("x");} else bots_message(" "); } bots_message("\n"); } bots_message("\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { bots_message("Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } } void sparselu_par_call(float **BENCH) { int ii, jj, kk; { bots_message("Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel private(kk) ; { #pragma omp single ; for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { #ifdef HCLIB_TASK_UNTIED #pragma omp task firstprivate(kk, jj) shared(BENCH) untied #else #pragma omp task firstprivate(kk, jj) shared(BENCH) #endif ; { ____num_tasks[omp_get_thread_num()]++; { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } ; } } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { #ifdef HCLIB_TASK_UNTIED #pragma omp task firstprivate(kk, ii) shared(BENCH) untied #else #pragma omp task firstprivate(kk, ii) shared(BENCH) #endif ; { ____num_tasks[omp_get_thread_num()]++; { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } ; } } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { #ifdef HCLIB_TASK_UNTIED #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) untied #else #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) #endif ; { ____num_tasks[omp_get_thread_num()]++; { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } ; } } } } bots_message(" completed!\n"); } ; { int __i; assert(omp_get_max_threads() <= 32); for (__i = 0; __i < omp_get_max_threads(); __i++) { fprintf(stderr, "Thread %d: %d\n", __i, ____num_tasks[__i]); } } } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
fitzhugh_1961.c
#include "fitzhugh_1961.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { sv[0] = 0.000000f; //V millivolt sv[1] = 0.000000f; //h dimensionless } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current); for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current) { //State variables const real V_old_ = sv[0]; const real h_old_ = sv[1]; //Parameters const real alpha = -0.100000000000000e+00f; const real gamma = 3.000000000000000e+00f; const real epsilon = 5.000000000000000e-03f; real calc_I_stim = stim_current; rDY_[0] = (( V_old_*(V_old_ - alpha)*(1.00000 - V_old_) - h_old_) + calc_I_stim); rDY_[1] = epsilon*(V_old_ - gamma*h_old_); }
matrixmultiply-ompacc.c
/* Naive matrix-matrix multiplication(mmm) By C. Liao */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #define N 1024 #define M 1024 #define K 1024 #define REAL float int i,j,k; REAL a[N][M],b[M][K],c[N][K], c2[N][K]; int init(); int mmm(); int mmm2(); int verify(); int main(void) { init(); mmm(); mmm2(); return verify(); } int init() { for (i=0;i<N;i++) for(j=0;j<M;j++) a[i][j]=3.0*i*j/N/M; for (i=0;i<M;i++) for(j=0;j<K;j++) b[i][j]=5.0*j*i/N/M; for (i=0;i<N;i++) for(j=0;j<K;j++) { c[i][j]=0.0; c2[i][j]=0.0; } return 0; } /* TODO: try different i,j,k orders a b e f a*e+ b*g , a*f+ b*h c d x g h = c*e+ d*g, c*f+ d*h */ int mmm() { #pragma omp target map(inout:c[0:N][0:M]), map(in:a[0:N][0:M],b[0:M][0:K],j,k) #pragma omp parallel for private(i,j,k) for (i = 0; i < N; i++) for (j = 0; j < M; j++) for (k = 0; k < K; k++) c[i][j]= c[i][j]+a[i][k]*b[k][j]; return 0; } int mmm2() { for (i = 0; i < N; i++) for (j = 0; j < M; j++) for (k = 0; k < K; k++) c2[i][j]= c2[i][j]+a[i][k]*b[k][j]; return 0; } int verify() { REAL sum=0.0, sum2=0.0; for (i=0;i<N;i++) for(j=0;j<K;j++) { sum+=c[i][j]; sum2+=c2[i][j]; } printf("sum of c[i][j] is %f\n",sum); printf("sum of c2[i][j] is %f\n",sum2); return 0; }
DRB055-jacobi2d-parallel-no.c
/** * jacobi-2d-imper.c: This file is part of the PolyBench/C 3.2 test suite. * Jacobi with array copying, no reduction. * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include "polybench/polybench.h" /* Include benchmark-specific header. */ /* Default data type is double, default size is 20x1000. */ #include "polybench/jacobi-2d-imper.h" /* Array initialization. */ static void init_array(int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0]) { //int i; //int j; { int c2; int c1; if (n >= 1) { #pragma omp parallel for private(c1 ,c2 ) for (c1 = 0; c1 <= n + -1; c1++) { #pragma omp parallel for private(c2 ) for (c2 = 0; c2 <= n + -1; c2++) { A[c1][c2] = (((double )c1) * (c2 + 2) + 2) / n; B[c1][c2] = (((double )c1) * (c2 + 3) + 3) / n; } } } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n,double A[500 + 0][500 + 0]) { int i; int j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) { fprintf(stderr,"%0.2lf ",A[i][j]); if ((i * n + j) % 20 == 0) fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_jacobi_2d_imper(int tsteps,int n,double A[500 + 0][500 + 0],double B[500 + 0][500 + 0]) { //int t; //int i; //int j; //#pragma scop { int c2; int c1; int c0; for (c2 = 1; c2 <= 498; c2++) { B[1][c2] = 0.2 * (A[1][c2] + A[1][c2 - 1] + A[1][1 + c2] + A[1 + 1][c2] + A[1 - 1][c2]); } for (c0 = 2; c0 <= 525; c0++) { if (c0 <= 28) { if ((2 * c0 + 1) % 3 == 0) { for (c2 = ((2 * c0 + 1) * 3 < 0?-(-(2 * c0 + 1) / 3) : ((3 < 0?(-(2 * c0 + 1) + - 3 - 1) / - 3 : (2 * c0 + 1 + 3 - 1) / 3))); c2 <= (((2 * c0 + 1492) * 3 < 0?((3 < 0?-((-(2 * c0 + 1492) + 3 + 1) / 3) : -((-(2 * c0 + 1492) + 3 - 1) / 3))) : (2 * c0 + 1492) / 3)); c2++) { B[1][(-2 * c0 + 3 * c2 + 2) / 3] = 0.2 * (A[1][(-2 * c0 + 3 * c2 + 2) / 3] + A[1][(-2 * c0 + 3 * c2 + 2) / 3 - 1] + A[1][1 + (-2 * c0 + 3 * c2 + 2) / 3] + A[1 + 1][(-2 * c0 + 3 * c2 + 2) / 3] + A[1 - 1][(-2 * c0 + 3 * c2 + 2) / 3]); } } } #pragma omp parallel for private(c1 ,c2 ) for (c1 = ((((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) > c0 + -9?(((2 * c0 + 2) * 3 < 0?-(-(2 * c0 + 2) / 3) : ((3 < 0?(-(2 * c0 + 2) + - 3 - 1) / - 3 : (2 * c0 + 2 + 3 - 1) / 3)))) : c0 + -9); c1 <= (((((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) < c0?(((2 * c0 + 498) * 3 < 0?((3 < 0?-((-(2 * c0 + 498) + 3 + 1) / 3) : -((-(2 * c0 + 498) + 3 - 1) / 3))) : (2 * c0 + 498) / 3)) : c0)); c1++) { B[-2 * c0 + 3 * c1][1] = 0.2 * (A[-2 * c0 + 3 * c1][1] + A[-2 * c0 + 3 * c1][1 - 1] + A[-2 * c0 + 3 * c1][1 + 1] + A[1 + (-2 * c0 + 3 * c1)][1] + A[-2 * c0 + 3 * c1 - 1][1]); for (c2 = 2 * c0 + -2 * c1 + 2; c2 <= 2 * c0 + -2 * c1 + 498; c2++) { A[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1] = B[-2 * c0 + 3 * c1 + -1][-2 * c0 + 2 * c1 + c2 + -1]; B[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] = 0.2 * (A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1][-2 * c0 + 2 * c1 + c2 - 1] + A[-2 * c0 + 3 * c1][1 + (-2 * c0 + 2 * c1 + c2)] + A[1 + (-2 * c0 + 3 * c1)][-2 * c0 + 2 * c1 + c2] + A[-2 * c0 + 3 * c1 - 1][-2 * c0 + 2 * c1 + c2]); } A[-2 * c0 + 3 * c1 + -1][498] = B[-2 * c0 + 3 * c1 + -1][498]; } if (c0 >= 499) { if ((2 * c0 + 1) % 3 == 0) { #pragma omp parallel for private(c2 ) for (c2 = ((2 * c0 + -992) * 3 < 0?-(-(2 * c0 + -992) / 3) : ((3 < 0?(-(2 * c0 + -992) + - 3 - 1) / - 3 : (2 * c0 + -992 + 3 - 1) / 3))); c2 <= (((2 * c0 + 499) * 3 < 0?((3 < 0?-((-(2 * c0 + 499) + 3 + 1) / 3) : -((-(2 * c0 + 499) + 3 - 1) / 3))) : (2 * c0 + 499) / 3)); c2++) { A[498][(-2 * c0 + 3 * c2 + 995) / 3] = B[498][(-2 * c0 + 3 * c2 + 995) / 3]; } } } } #pragma omp parallel for private(c2 ) for (c2 = 20; c2 <= 517; c2++) { A[498][c2 + -19] = B[498][c2 + -19]; } } //#pragma endscop } int main(int argc,char **argv) { /* Retrieve problem size. */ int n = 500; int tsteps = 10; /* Variable declaration/allocation. */ double (*A)[500 + 0][500 + 0]; A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; double (*B)[500 + 0][500 + 0]; B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double ))))); ; /* Initialize array(s). */ init_array(n, *A, *B); /* Start timer. */ polybench_timer_start(); ; /* Run kernel. */ kernel_jacobi_2d_imper(tsteps,n, *A, *B); /* Stop and print timer. */ polybench_timer_stop(); ; polybench_timer_print(); ; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ print_array(n, *A); /* Be clean. */ free(((void *)A)); ; free(((void *)B)); ; return 0; }
opencl_keychain_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_keychain); #else #include <string.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "options.h" #include "jumbo.h" #include "common-opencl.h" #define FORMAT_LABEL "keychain-opencl" #define FORMAT_NAME "Mac OS X Keychain" #define FORMAT_TAG "$keychain$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(*salt_struct) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define SALTLEN 20 #define IVLEN 8 #define CTLEN 48 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[32/4]; } keychain_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_main *self; static struct fmt_tests keychain_tests[] = { {"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"}, // these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash. {"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"}, {"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"}, {"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"}, {"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"}, {NULL} }; static struct custom_salt { unsigned char salt[SALTLEN]; unsigned char iv[IVLEN]; unsigned char ct[CTLEN]; } *salt_struct; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */ goto err; if(hexlenl(p, &extra) != SALTLEN * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if(hexlenl(p, &extra) != IVLEN * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if(hexlenl(p, &extra) != CTLEN * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt *salt_struct; if (!salt_struct) salt_struct = mem_calloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); ctcopy += FORMAT_TAG_LEN; /* skip over "$keychain$*" */ p = strtokm(ctcopy, "*"); for (i = 0; i < SALTLEN; i++) salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < IVLEN; i++) salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < CTLEN; i++) salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)salt_struct; } static void set_salt(void *salt) { salt_struct = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, salt_struct->salt, 20); currentsalt.length = 20; currentsalt.iterations = 1000; currentsalt.outlen = 24; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data) { unsigned char out[CTLEN]; DES_cblock key1, key2, key3; DES_cblock ivec; DES_key_schedule ks1, ks2, ks3; memset(out, 0, sizeof(out)); memcpy(key1, key, 8); memcpy(key2, key + 8, 8); memcpy(key3, key + 16, 8); DES_set_key((DES_cblock *) key1, &ks1); DES_set_key((DES_cblock *) key2, &ks2); DES_set_key((DES_cblock *) key3, &ks3); memcpy(ivec, iv, 8); DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT); /* possible bug here, is this assumption (pad of 4) always valid? */ if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0) return -1; return 0; } #if 0 //#ifdef DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!kcdecrypt((unsigned char*)outbuffer[index].v, salt_struct->iv, salt_struct->ct)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, keychain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
special_random_ops.h
// // @author raver119@gmail.com // #ifndef LIBND4J_SPECIAL_RANDOM_OPS_H #define LIBND4J_SPECIAL_RANDOM_OPS_H #include <ops/random_ops.h> #include <helpers/shape.h> namespace randomOps { ////////////////////////////////////////////////////////////////////// template<typename T> class Choice { public: method_idx method_X method_XY static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; __shared__ Nd4jLong xLength; __shared__ Nd4jLong yLength; __shared__ Nd4jLong zLength; __shared__ Nd4jLong xEWS; __shared__ Nd4jLong yEWS; __shared__ Nd4jLong zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); xLength = shape::length(xShapeBuffer); yLength = shape::length(yShapeBuffer); zLength = shape::length(zShapeBuffer); xEWS = shape::elementWiseStride(xShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { for (Nd4jLong e = tid; e < zLength; e+=blockDim.x * gridDim.x) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } __syncthreads(); } __syncthreads(); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; __shared__ int xRank; __shared__ int yRank; __shared__ int zRank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = shape::rank(xShapeBuffer); yRank = shape::rank(yShapeBuffer); zRank = shape::rank(zShapeBuffer); xShape = shape::shapeOf(xShapeBuffer); yShape = shape::shapeOf(yShapeBuffer); zShape = shape::shapeOf(zShapeBuffer); xStride = shape::stride(xShapeBuffer); yStride = shape::stride(yShapeBuffer); zStride = shape::stride(zShapeBuffer); } __syncthreads(); for (Nd4jLong i = tid; i < zLength; i+=blockDim.x * gridDim.x) { shape::ind2sub(zRank, zShape, i, zCoord); auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); auto xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } __syncthreads(); } __syncthreads(); } } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { /** * X holds data, * Y holds probabilities * Z will hold results */ nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); // TODO: we probably might want to skip this sum, and state that probabilities array should be real probabilities, i.e. should sum to 1.0 //T probSum = extraArguments[0]; Nd4jLong yLength = shape::length(yShapeBuffer); Nd4jLong zLength = shape::length(zShapeBuffer); auto xEWS = shape::elementWiseStride(xShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); if (zEWS >= 1 && xEWS >= 1 && yEWS >= 1) { #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong e = 0; e < zLength; e++) { T prob = buffer->relativeT<T>(e); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { T relProb = y[f * yEWS]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { z[e * zEWS] = x[f * xEWS]; f += yLength; } } } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong zCoord[MAX_RANK]; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int zRank = shape::rank(zShapeBuffer); auto xShape = shape::shapeOf(xShapeBuffer); auto yShape = shape::shapeOf(yShapeBuffer); auto zShape = shape::shapeOf(zShapeBuffer); auto xStride = shape::stride(xShapeBuffer); auto yStride = shape::stride(yShapeBuffer); auto zStride = shape::stride(zShapeBuffer); #pragma omp parallel for num_threads(_threads) if (_threads > 1) schedule(guided) for (Nd4jLong i = 0; i < zLength; i++) { shape::ind2sub(zRank, zShape, i, zCoord); auto zOffset2 = shape::getOffset(0, zShape, zStride, zCoord, zRank); T prob = buffer->relativeT<T>(i); T cumProb = (T) 0.0f; for (Nd4jLong f = 0; f < yLength; f++) { shape::ind2sub(yRank, yShape, i, yCoord); auto yOffset2 = shape::getOffset(0, yShape, yStride, yCoord, yRank); T relProb = y[yOffset2]; cumProb += relProb; if (prob <= cumProb || f == yLength - 1) { shape::ind2sub(xRank, xShape, f, xCoord); Nd4jLong xOffset2 = shape::getOffset(0, xShape, xStride, xCoord, xRank); z[zOffset2] = x[xOffset2]; f += yLength; } } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within specified boundaries. Distribuion is Gaussian */ template<typename T> class GaussianDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-5); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, static_cast<T>(1.0f)); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean; else z[e *zEWS] = (nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean; __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); auto zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jLong e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, static_cast<T>(1e-5f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((e + 1), static_cast<T>(1e-5f), static_cast<T>(1.0f)); lnU0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z0 * stddev + realMean; } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = z1 * stddev + realMean; generated = false; } } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *>(state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[(t-1) * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// /** * This Op produces random values within [0..N], Distribuion is binomial */ template<typename T> class BinomialDistributionEx { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; T prob = extraArguments[1]; __shared__ Nd4jLong zLength; __shared__ int yEWS; __shared__ int zEWS; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = (nd4j::random::RandomBuffer *) shmem; cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); zLength = shape::length(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += blockDim.x * gridDim.x) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // we need this, to eliminate excessive code branching in runtime __syncthreads(); // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = (T) success; } __syncthreads(); if (trials > 0) devBuffer->rewind(zLength * trials); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { int trials = (int) extraArguments[0]; Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); auto span = (zLength / _threads) + 8; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T prob = extraArguments[1]; for (Nd4jLong e = start; e < end; e++) { int success = 0; for (int t = 1; t <= trials; t++) { T randVal = buffer->relativeT<T>((e+1) * t); if (y != z) { // we're using external probs prob = y[e * yEWS]; } if (randVal < prob) success++; } // if trials is set to 0, effectively we just have successful memset z[e * zEWS] = static_cast<T>(success); } } // update rng state if (trials > 0) buffer->rewindH(zLength * trials); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Gaussian values within [mean-2*stddev,mean+2*stddev] template<typename T> class TruncatedNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T *>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-6f); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; T result0, result1, u0, u1, z0, z1, uT, uP; T ds = nd4j::math::nd4j_abs<T>(stddev) * static_cast<T>(2.0f); for (Nd4jLong e = tid; e < middle; e += step) { // we need to get random values Nd4jLong generation0 = 0; auto epm = e + middle; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[epm * yEWS]; T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0); T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1); do { u0 = buffer->relativeT<T>(e + generation0, epsilon, static_cast<T>(1.0f)); u1 = buffer->relativeT<T>(epm + generation0, epsilon, static_cast<T>(1.0f)); uT = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); uP = two_pi * u1; z0 = uT * nd4j::math::nd4j_cos<T>(uP); z1 = uT * nd4j::math::nd4j_sin<T>(uP); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (ds < aRealMean0 + nd4j::math::nd4j_abs<T>(result0) || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds); z[e * zEWS] = result0; if((epm) < zLength) z[epm * zEWS] = result1; } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); auto middle = zLength % 2 == 0 ? zLength / 2 : zLength / 2 + 1; int elementsPerThread = middle / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (middle / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > middle) { end = middle; } T z0, z1; T u0, u1; T result0, result1, lnu0, lnu1; T ds = nd4j::math::nd4j_abs<T>(stddev) * (T) 2.0f; for (Nd4jLong e = start; e < end; e++) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ Nd4jLong generation0 = 0; auto epm = e + middle; T realMean0 = y == z ? mean : y[e * yEWS]; T realMean1 = y == z ? mean : y[epm * yEWS]; T aRealMean0 = nd4j::math::nd4j_abs<T>(realMean0); T aRealMean1 = nd4j::math::nd4j_abs<T>(realMean1); do { u0 = buffer->relativeT<T>(e + generation0, static_cast<T>(1e-6f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((epm + generation0), static_cast<T>(1e-6f), static_cast<T>(1.0f)); lnu0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); lnu1 = two_pi * u1; z0 = lnu0 * nd4j::math::nd4j_cos<T>(lnu1); z1 = lnu0 * nd4j::math::nd4j_sin<T>(lnu1); result0 = z0 * stddev + realMean0; result1 = z1 * stddev + realMean1; generation0 += zLength; } while (aRealMean0 + nd4j::math::nd4j_abs<T>(result0) > ds || aRealMean1 + nd4j::math::nd4j_abs<T>(result1) > ds); z[e*zEWS] = result0; if(epm < zLength) z[epm * zEWS] = result1; } } // update rng state buffer->rewindH(zLength); } }; ////////////////////////////////////////////////////////////////////// // This Op produces random Log-normal distribution template<typename T> class LogNormalDistribution { public: method_XY method_X method_idx static const bool requiresSpecial = true; #ifdef __CUDACC__ __device__ static inline void specialOpCuda(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { __shared__ T epsilon; __shared__ T two_pi; __shared__ Nd4jLong zLength; __shared__ Nd4jLong zEWS; __shared__ Nd4jLong yEWS; __shared__ T mean; __shared__ T stddev; __shared__ int step; __shared__ T *tZ; __shared__ nd4j::random::RandomBuffer *buffer; __shared__ unsigned char *cB; __shared__ unsigned char *dB; __shared__ nd4j::random::RandomBuffer *devBuffer; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; buffer = reinterpret_cast<nd4j::random::RandomBuffer *>(shmem); cB = shmem; devBuffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); dB = reinterpret_cast<unsigned char *> (state); tZ = reinterpret_cast<T*>(shmem + sizeof(nd4j::random::RandomBuffer)); zLength = shape::length(zShapeBuffer); zEWS = shape::elementWiseStride(zShapeBuffer); yEWS = shape::elementWiseStride(yShapeBuffer); epsilon = static_cast<T>(1e-5); two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); mean = extraArguments[0]; stddev = extraArguments[1]; step = (blockDim.x * gridDim.x); } __syncthreads(); // using this loop instead of memcpy for (int e = threadIdx.x; e < sizeof(nd4j::random::RandomBuffer); e+= blockDim.x) { cB[e] = dB[e]; } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < zLength; e += step) { // we need to get random values tZ[threadIdx.x] = buffer->relativeT<T>(e, epsilon, static_cast<T>(1.0f)); // fix for "next rng value" if (e + 1 >= zLength && e % 2 == 0) { tZ[threadIdx.x+1] = buffer->relativeT<T>(e+1, epsilon, static_cast<T>(1.0f)); } T realMean = y == z ? mean : y[e * yEWS]; __syncthreads(); if (e % 2 == 0) z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x])) * nd4j::math::nd4j_cos<T>(two_pi * tZ[threadIdx.x+1])) * stddev + realMean); else z[e *zEWS] = nd4j::math::nd4j_exp<T>((nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(tZ[threadIdx.x-1])) * nd4j::math::nd4j_sin<T>(two_pi * tZ[threadIdx.x])) * stddev + realMean); __syncthreads(); } __syncthreads(); devBuffer->rewind(zLength); } #endif static inline void specialOp(Nd4jPointer state, T *x, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *z, Nd4jLong *zShapeBuffer, T *extraArguments) { const T two_pi = static_cast<T>(2.0f) * static_cast<T>(3.14159265358979323846); Nd4jLong zLength = shape::length(zShapeBuffer); auto yEWS = shape::elementWiseStride(yShapeBuffer); auto zEWS = shape::elementWiseStride(zShapeBuffer); int elementsPerThread = zLength / TAD_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (zLength / _threads) + 8; // we're enforcing even chunks, since it's mandatory for this algorithm span -= span % 2; auto buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (state); T mean = extraArguments[0]; T stddev = extraArguments[1]; #pragma omp parallel num_threads(_threads) if (_threads > 1) proc_bind(spread) { int tid = omp_get_thread_num(); Nd4jLong start = span * tid; Nd4jLong end = span * (tid + 1); if (end > zLength) end = zLength; T z0, z1; T u0, u1; T lnU0; bool generated = false; for (Nd4jLong e = start; e < end; e++) { if (!generated) { /* * Since box-muller transform expects non-zero u0 value, we'll just use rng with boundaries */ u0 = buffer->relativeT<T>(e, static_cast<T>(1e-5f), static_cast<T>(1.0f)); u1 = buffer->relativeT<T>((e + 1), static_cast<T>(1e-5f), static_cast<T>(1.0f)); lnU0 = nd4j::math::nd4j_sqrt<T>(static_cast<T>(-2.0f) * nd4j::math::nd4j_log<T>(u0)); z0 = lnU0 * nd4j::math::nd4j_cos<T>(two_pi * u1); z1 = lnU0 * nd4j::math::nd4j_sin<T>(two_pi * u1); generated = true; T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z0 * stddev + realMean); } else { T realMean = y == z ? mean : y[e * yEWS]; z[e * zEWS] = nd4j::math::nd4j_exp<T>(z1 * stddev + realMean); generated = false; } } } // update rng state buffer->rewindH(zLength); } }; } #endif //LIBND4J_SPECIAL_RANDOM_OPS_H
convert.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> #include "../sptensor.h" #include "hicoo.h" /** * Compare two specified coordinates. * @param tsr a pointer to a sparse tensor * @return 1, z == item; otherwise, 0. */ static int sptEqualWithTwoCoordinates( const sptIndex * item1, const sptIndex * item2, const sptIndex nmodes) { sptIndex i1, i2; for(sptIndex m=0; m<nmodes; ++m) { i1 = item1[m]; i2 = item2[m]; if(i1 != i2) { return 0; break; } } return 1; } /** * Compute the end of this block * @param tsr a pointer to a sparse tensor * @return out_item the end indices of this block */ static int sptBlockEnd( sptIndex * out_item, sptSparseTensor *tsr, const sptIndex * in_item, const sptElementIndex sb) { sptIndex nmodes = tsr->nmodes; for(sptIndex m=0; m<nmodes; ++m) { sptAssert(in_item[m] < tsr->ndims[m]); out_item[m] = in_item[m]+sb < tsr->ndims[m] ? in_item[m]+sb : tsr->ndims[m]; // exclusive } return 0; } /** * Locate the beginning of the block/kernel containing the coordinates * @param tsr a pointer to a sparse tensor * @return out_item the beginning indices of this block */ static int sptLocateBeginCoord( sptIndex * out_item, sptSparseTensor *tsr, const sptIndex * in_item, const sptElementIndex bits) { sptIndex nmodes = tsr->nmodes; for(sptIndex m=0; m<nmodes; ++m) { out_item[m] = in_item[m] >> bits; } return 0; } /** * Record mode pointers for kernel rows, from a sorted tensor. * @param mptr a vector of pointers as a dense array * @param tsr a pointer to a sparse tensor * @return mode pointers */ int sptGetRowBlockPointers( sptNnzIndexVector *mptr, sptSparseTensor *tsr, const sptIndex sk) { sptNnzIndex nnz = tsr->nnz; sptIndex i = tsr->inds[0].data[0]; sptNnzIndex k = 0; // count blocks sptNnzIndex knnz = 0; // #Nonzeros per block mptr->data[0] = 0; while(1) { /* check if mode-0 index in block-b */ if(i >= sk * k && i < sk * (k+1)) { ++ knnz; break; } else { ++ k; mptr->data[k] = knnz + mptr->data[k-1]; knnz = 0; } } for(sptNnzIndex z=1; z<nnz; ++z) { i = tsr->inds[0].data[z]; /* Compare with the next block row index */ while(1) { if(i >= sk * k && i < sk * (k+1)) { ++ knnz; break; } else { ++ k; mptr->data[k] = knnz + mptr->data[k-1]; knnz = 0; } } } sptAssert(k < (tsr->ndims[0] + sk -1 ) / sk); sptAssert(mptr->data[mptr->len-1] + knnz == nnz); return 0; } /** * Record mode pointers for kernel rows, from a sorted tensor. * @param kptr a vector of kernel pointers * @param tsr a pointer to a sparse tensor * @return mode pointers */ int sptSetKernelPointers( sptNnzIndexVector *kptr, sptNnzIndexVector *knnzs, sptSparseTensor *tsr, const sptElementIndex sk_bits) { sptIndex nmodes = tsr->nmodes; sptNnzIndex nnz = tsr->nnz; sptNnzIndex k = 0; // count kernels sptNnzIndex knnz = 0; // #Nonzeros per kernel int result = 0; result = sptAppendNnzIndexVector(kptr, 0); spt_CheckError(result, "HiSpTns Convert", NULL); sptIndex * coord = (sptIndex *)malloc(nmodes * sizeof(*coord)); sptIndex * kernel_coord = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord)); sptIndex * kernel_coord_prior = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord_prior)); /* Process first nnz to get the first kernel_coord_prior */ for(sptIndex m=0; m<nmodes; ++m) coord[m] = tsr->inds[m].data[0]; // first nonzero indices result = sptLocateBeginCoord(kernel_coord_prior, tsr, coord, sk_bits); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptNnzIndex z=0; z<nnz; ++z) { for(sptIndex m=0; m<nmodes; ++m) coord[m] = tsr->inds[m].data[z]; result = sptLocateBeginCoord(kernel_coord, tsr, coord, sk_bits); spt_CheckError(result, "HiSpTns Convert", NULL); if(sptEqualWithTwoCoordinates(kernel_coord, kernel_coord_prior, nmodes) == 1) { ++ knnz; } else { ++ k; result = sptAppendNnzIndexVector(kptr, knnz + kptr->data[k-1]); spt_CheckError(result, "HiSpTns Convert", NULL); result = sptAppendNnzIndexVector(knnzs, knnz); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptIndex m=0; m<nmodes; ++m) kernel_coord_prior[m] = kernel_coord[m]; knnz = 1; } } sptAssert(k < kptr->len); sptAssert(kptr->data[kptr->len-1] + knnz == nnz); /* Set the last element for kptr */ sptAppendNnzIndexVector(kptr, nnz); sptAppendNnzIndexVector(knnzs, knnz); free(coord); free(kernel_coord); free(kernel_coord_prior); return 0; } /** * Set scheduler for kernels. * @param kschr nmodes kernel schedulers. * @param tsr a pointer to a sparse tensor * @return mode pointers */ int sptSetKernelScheduler( sptIndexVector **kschr, sptIndex *nkiters, sptNnzIndexVector * const kptr, sptSparseTensor *tsr, const sptElementIndex sk_bits) { sptIndex nmodes = tsr->nmodes; sptIndex * ndims = tsr->ndims; int result = 0; sptIndex * coord = (sptIndex *)malloc(nmodes * sizeof(*coord)); sptIndex * kernel_coord = (sptIndex *)malloc(nmodes * sizeof(*kernel_coord)); for(sptNnzIndex k=0; k<kptr->len - 1; ++k) { sptNnzIndex z = kptr->data[k]; for(sptIndex m=0; m<nmodes; ++m) coord[m] = tsr->inds[m].data[z]; result = sptLocateBeginCoord(kernel_coord, tsr, coord, sk_bits); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptIndex m=0; m<nmodes; ++m) { result = sptAppendIndexVector(&(kschr[m][kernel_coord[m]]), k); spt_CheckError(result, "HiSpTns Convert", NULL); } } free(coord); free(kernel_coord); sptIndex sk = (sptIndex)pow(2, sk_bits); sptIndex tmp; for(sptIndex m=0; m<nmodes; ++m) { tmp = 0; sptIndex kernel_ndim = (ndims[m] + sk - 1) / sk; for(sptIndex i=0; i<kernel_ndim; ++i) { if(tmp < kschr[m][i].len) tmp = kschr[m][i].len; } nkiters[m] = tmp; } return 0; } /** * Pre-process COO sparse tensor by permuting, sorting, and record pointers to blocked rows. Kernels in Row-major order, blocks and elements are in Z-Morton order. * @param tsr a pointer to a sparse tensor * @return mode pointers */ int sptPreprocessSparseTensor( sptNnzIndexVector * kptr, sptIndexVector **kschr, sptIndex *nkiters, sptIndexVector **kschr_balanced, sptIndexVector **kschr_balanced_pos, sptIndex *nkpars, sptIndexVector * kschr_rest, sptNnzIndexVector * knnzs, sptSparseTensor *tsr, const sptElementIndex sb_bits, const sptElementIndex sk_bits, int const tk) { sptNnzIndex nnz = tsr->nnz; int result; // TODO: possible permute modes to improve parallelism /* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */ sptTimer rowblock_sort_timer; sptNewTimer(&rowblock_sort_timer, 0); sptStartTimer(rowblock_sort_timer); sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk); // Parallelized inside sptStopTimer(rowblock_sort_timer); sptPrintElapsedTime(rowblock_sort_timer, "\t\trowblock sorting"); sptFreeTimer(rowblock_sort_timer); #if PARTI_DEBUG == 3 printf("Sorted by sptSparseTensorSortIndexRowBlock.\n"); sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0); #endif sptTimer set_kernel_timer; sptNewTimer(&set_kernel_timer, 0); sptStartTimer(set_kernel_timer); result = sptSetKernelPointers(kptr, knnzs, tsr, sk_bits); spt_CheckError(result, "HiSpTns Preprocess", NULL); result = sptSetKernelScheduler(kschr, nkiters, kptr, tsr, sk_bits); spt_CheckError(result, "HiSpTns Preprocess", NULL); // printf("OK\n"); fflush(stdout); /* Set balanced data structures: kschr_balanced, kschr_rest */ // sptNnzIndex avg_nnzk = tsr->nnz / (kptr->len - 1); sptNnzIndex max_nnzk = 0; for(sptIndex k=0; k<kptr->len - 1; ++k) { sptNnzIndex nnzk = knnzs->data[k]; if(max_nnzk < nnzk) max_nnzk = nnzk; } // sptNnzIndex par_nnzk_th = 20 * avg_nnzk; // threshold for nnzk per thread sptNnzIndex par_nnzk_th = 5 * max_nnzk; // threshold for nnzk per thread printf("par_nnzk_th: %lu\n", par_nnzk_th); sptIndex sk = (sptIndex)pow(2, sk_bits); // printf("OK-2\n"); fflush(stdout); for(sptIndex m=0; m < tsr->nmodes; ++m) { // Loop kschr for each mode sptIndexVector * restrict kschr_mode = kschr[m]; sptIndexVector * restrict kschr_balanced_mode = kschr_balanced[m]; sptIndexVector * restrict kschr_balanced_pos_mode = kschr_balanced_pos[m]; sptIndex kernel_ndim = (tsr->ndims[m] + sk - 1)/sk; for(sptIndex i=0; i < kernel_ndim; ++i) { sptAppendIndexVector(&(kschr_balanced_pos_mode[i]), 0); } // sptIndex j_rest = nkiters[m]; sptIndex npars = 0; int tag_rest = 0; sptIndex count_nk = 0; sptIndex empty_schr_rows_th = 1.0 * kernel_ndim > 1 ? 1.0 * kernel_ndim : 1; printf("[mode %u] empty_schr_rows_th: %u\n", m, empty_schr_rows_th); while(tag_rest == 0 && count_nk < kptr->len - 1) { // Loop for partitions. tag_rest = 1, maybe there is no rest. /* Check two ranges: npars and j or tmp_j !!! */ sptIndex max_nnzk_per_col = 0, par_nnzk = 0; sptIndex count_empty_schr_rows = 0; for(sptIndex i=0; i < kernel_ndim; ++i) { // Find the max nnzk if(count_empty_schr_rows > empty_schr_rows_th) { tag_rest = 1; break; } if(npars >= kschr_balanced_pos_mode[i].len) { ++ count_empty_schr_rows; continue; } else { sptIndex j = kschr_balanced_pos_mode[i].data[npars]; if(j >= kschr_mode[i].len) { ++ count_empty_schr_rows; continue; } sptIndex kernel_num = kschr_mode[i].data[j]; sptNnzIndex kernel_nnz = knnzs->data[kernel_num]; if (max_nnzk_per_col < kernel_nnz) { max_nnzk_per_col = kernel_nnz; } } } // End of i if(tag_rest == 1) { // an empty superblock met, to kschr_rest for(sptIndex i=0; i < kernel_ndim; ++i) { if(npars >= kschr_balanced_pos_mode[i].len) continue; sptIndex j2 = kschr_balanced_pos_mode[i].data[npars]; for(; j2 < kschr_mode[i].len; ++j2) { sptAppendIndexVector(&kschr_rest[m], kschr_mode[i].data[j2]); ++ count_nk; } } } else { // all non-empty superblocks for this column, to kschr_balanced, kschr_balanced_pos /* set par_nnzk */ if(max_nnzk_per_col > par_nnzk_th) { par_nnzk = max_nnzk_per_col; // split according to the superblock with the max nnzk } else { par_nnzk = par_nnzk_th; } /* Real partition */ for(sptIndex i=0; i < kernel_ndim; ++i) { if(npars >= kschr_balanced_pos_mode[i].len) continue; sptIndex tmp_j = kschr_balanced_pos_mode[i].data[npars]; if(tmp_j >= kschr_mode[i].len) continue; sptIndex kernel_num = kschr_mode[i].data[tmp_j]; sptNnzIndex sum_nnzk = knnzs->data[kernel_num]; while(sum_nnzk <= par_nnzk) { sptAppendIndexVector(&(kschr_balanced_mode[i]), kernel_num); ++ count_nk; ++ tmp_j; if(tmp_j < kschr_mode[i].len) { kernel_num = kschr_mode[i].data[tmp_j]; // j + 1 sum_nnzk += knnzs->data[kernel_num]; } else { break; } } // End of while sptAppendIndexVector(&(kschr_balanced_pos_mode[i]), tmp_j); } ++ npars; } // printf("count_nk: %u\n", count_nk); fflush(stdout); } // End of while nkpars[m] = npars; // kschr_balanced_pos.len is npars + 1. } // End loop of modes sptStopTimer(set_kernel_timer); sptPrintElapsedTime(set_kernel_timer, "\t\tSet Kernel Ptrs"); sptFreeTimer(set_kernel_timer); sptTimer morton_sort_timer; sptNewTimer(&morton_sort_timer, 0); sptStartTimer(morton_sort_timer); /* Sort blocks in each kernel in Morton-order */ sptNnzIndex k_begin, k_end; /* Loop for all kernels, 0-kptr.len for OMP code */ #pragma omp parallel for num_threads(tk) for(sptNnzIndex k=0; k<kptr->len - 1; ++k) { k_begin = kptr->data[k]; k_end = kptr->data[k+1]; // exclusive /* Sort blocks in each kernel in Morton-order */ sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk); // sptSparseTensorSortIndexRowBlock(tsr, 1, k_begin, k_end, sb_bits, tk); #if PARTI_DEBUG == 3 printf("Kernel %"PARTI_PRI_NNZ_INDEX ": Sorted by sptSparseTensorSortIndexMorton.\n", k); sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0); #endif } sptStopTimer(morton_sort_timer); sptPrintElapsedTime(morton_sort_timer, "\t\tMorton sorting"); // sptPrintElapsedTime(morton_sort_timer, "\t\t2nd Rowblock sorting"); sptFreeTimer(morton_sort_timer); return 0; } int sptSparseTensorToHiCOO( sptSparseTensorHiCOO *hitsr, sptNnzIndex *max_nnzb, sptSparseTensor *tsr, const sptElementIndex sb_bits, const sptElementIndex sk_bits, const sptElementIndex sc_bits, int const tk) { sptAssert(sk_bits >= sb_bits); sptAssert(sc_bits >= sb_bits); sptIndex i; int result; sptIndex nmodes = tsr->nmodes; sptNnzIndex nnz = tsr->nnz; sptElementIndex sb = pow(2, sb_bits); sptIndex sc = pow(2, sc_bits); /* Set HiCOO parameters. ndims for type conversion, size_t -> sptIndex */ sptIndex * ndims = malloc(nmodes * sizeof *ndims); spt_CheckOSError(!ndims, "HiSpTns Convert"); for(i = 0; i < nmodes; ++i) { ndims[i] = (sptIndex)tsr->ndims[i]; } result = sptNewSparseTensorHiCOO(hitsr, (sptIndex)tsr->nmodes, ndims, (sptNnzIndex)tsr->nnz, sb_bits, sk_bits, sc_bits); spt_CheckError(result, "HiSpTns Convert", NULL); /* Pre-process tensor to get hitsr->kptr, values are nonzero locations. */ sptTimer sort_timer; sptNewTimer(&sort_timer, 0); sptStartTimer(sort_timer); sptPreprocessSparseTensor(&hitsr->kptr, hitsr->kschr, hitsr->nkiters, hitsr->kschr_balanced, hitsr->kschr_balanced_pos, hitsr->nkpars, hitsr->kschr_rest, &hitsr->knnzs, tsr, sb_bits, sk_bits, tk); sptStopTimer(sort_timer); sptPrintElapsedTime(sort_timer, "\tHiCOO sorting (rowblock + morton)"); sptFreeTimer(sort_timer); #if PARTI_DEBUG >= 2 printf("Kernels: Row-major, blocks: Morton-order sorted:\n"); sptAssert(sptDumpSparseTensor(tsr, 0, stdout) == 0); printf("hitsr->kptr:\n"); sptDumpNnzIndexVector(&hitsr->kptr, stdout); #endif sptTimer gen_timer; sptNewTimer(&gen_timer, 0); sptStartTimer(gen_timer); /* Temporary storage */ sptIndex * block_begin = (sptIndex *)malloc(nmodes * sizeof(*block_begin)); sptIndex * block_end = (sptIndex *)malloc(nmodes * sizeof(*block_end)); sptIndex * block_begin_prior = (sptIndex *)malloc(nmodes * sizeof(*block_begin_prior)); sptIndex * block_coord = (sptIndex *)malloc(nmodes * sizeof(*block_coord)); sptNnzIndex k_begin, k_end; // #Nonzeros locations sptNnzIndex nk = 0; // #Kernels sptNnzIndex nc = 0; // #Chunks sptNnzIndex nb = 1; // #Blocks // counting from the first nnz sptNnzIndex nb_tmp = 0; sptNnzIndex ne = 0; // #Nonzeros per block sptIndex eindex = 0; sptBlockIndex chunk_size = 0; /* different appending methods: * elements: append every nonzero entry * blocks: append when seeing a new block. * chunks: appending when seeting a new chunk. Notice the boundary of kernels and the last chunk of the whole tensor may be larger than the sc. * kernels: append when seeing a new kernel. Not appending a vector, just write data into an allocated array. */ /* Process first nnz */ for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = tsr->inds[m].data[0]; // first nonzero indices result = sptLocateBeginCoord(block_begin_prior, tsr, block_coord, sb_bits); spt_CheckError(result, "HiSpTns Convert", NULL); for(sptIndex m=0; m<nmodes; ++m) sptAppendBlockIndexVector(&hitsr->binds[m], (sptBlockIndex)block_begin_prior[m]); sptAppendNnzIndexVector(&hitsr->bptr, 0); /* Loop for all kernels, 0 - hitsr->kptr.len - 1 for OMP code */ for(sptNnzIndex k=0; k<hitsr->kptr.len - 1; ++k) { k_begin = hitsr->kptr.data[k]; k_end = hitsr->kptr.data[k+1]; // exclusive nb_tmp = k == 0 ? 0: nb; /* Modify kptr pointing to block locations */ hitsr->kptr.data[k] = nb_tmp; ++ nk; /* Only append a chunk for the new kernel, the last chunk in the old kernel may be larger than sc */ sptAppendNnzIndexVector(&hitsr->cptr, nb_tmp); // printf("cptr 1:\n"); // sptDumpNnzIndexVector(&hitsr->cptr, stdout); ++ nc; chunk_size = 0; /* Loop nonzeros in each kernel */ for(sptNnzIndex z = k_begin; z < k_end; ++z) { #if PARTI_DEBUG == 5 printf("z: %"PARTI_PRI_NNZ_INDEX "\n", z); #endif for(sptIndex m=0; m<nmodes; ++m) block_coord[m] = tsr->inds[m].data[z]; // first nonzero indices #if PARTI_DEBUG == 5 printf("block_coord:\n"); sptAssert(sptDumpIndexArray(block_coord, nmodes, stdout) == 0); #endif result = sptLocateBeginCoord(block_begin, tsr, block_coord, sb_bits); // spt_CheckError(result, "HiSpTns Convert", NULL); #if PARTI_DEBUG == 5 printf("block_begin_prior:\n"); sptAssert(sptDumpIndexArray(block_begin_prior, nmodes, stdout) == 0); printf("block_begin:\n"); sptAssert(sptDumpIndexArray(block_begin, nmodes, stdout) == 0); #endif result = sptBlockEnd(block_end, tsr, block_begin, sb); // exclusive // spt_CheckError(result, "HiSpTns Convert", NULL); /* Append einds and values */ for(sptIndex m=0; m<nmodes; ++m) { eindex = tsr->inds[m].data[z] < (block_begin[m] << sb_bits) ? tsr->inds[m].data[z] : tsr->inds[m].data[z] - (block_begin[m] << sb_bits); sptAssert(eindex < sb); sptAppendElementIndexVector(&hitsr->einds[m], (sptElementIndex)eindex); } sptAppendValueVector(&hitsr->values, tsr->values.data[z]); /* z in the same block with last z */ if (sptEqualWithTwoCoordinates(block_begin, block_begin_prior, nmodes) == 1) { /* ne: #Elements in current block */ ++ ne; } else { /* New block */ /* ne: #Elements in the last block */ /* Append block bptr and bidx */ sptAppendNnzIndexVector(&hitsr->bptr, (sptBlockIndex)z); for(sptIndex m=0; m<nmodes; ++m) sptAppendBlockIndexVector(&hitsr->binds[m], (sptBlockIndex)block_begin[m]); for(sptIndex m=0; m<nmodes; ++m) block_begin_prior[m] = block_begin[m]; /* ne: old block's number of nonzeros */ // if(chunk_size + ne > sc || ne >= sc) { // if(chunk_size + ne >= sc && chunk_size > 0) { // calculate the prior block // /* Append a chunk ending by the old block */ // sptAppendNnzIndexVector(&hitsr->cptr, nb-1); // // printf("cptr 2:\n"); // // sptDumpNnzIndexVector(&hitsr->cptr, stdout); // ++ nc; // chunk_size = ne; // } else { // chunk_size += ne; // } if(chunk_size + ne >= sc) { // calculate the prior block /* Append a chunk ending by the old block */ sptAppendNnzIndexVector(&hitsr->cptr, nb); // printf("cptr 2:\n"); // sptDumpNnzIndexVector(&hitsr->cptr, stdout); // printf("nb: %u, chunk_size: %u, ne: %u\n", nb, chunk_size, ne); ++ nc; chunk_size = 0; } else { chunk_size += ne; } ++ nb; ne = 1; } // End new block #if PARTI_DEBUG == 5 printf("nk: %u, nc: %u, nb: %u, ne: %u, chunk_size: %lu\n\n", nk, nc, nb, ne, chunk_size); #endif } // End z loop } // End k loop sptAssert(nb <= nnz); sptAssert(nb == hitsr->binds[0].len); // sptAssert(nc <= nb); sptAssert(nk == hitsr->kptr.len - 1); /* Last element for kptr, cptr, bptr */ hitsr->kptr.data[hitsr->kptr.len - 1] = hitsr->bptr.len; sptAppendNnzIndexVector(&hitsr->cptr, hitsr->bptr.len); sptAppendNnzIndexVector(&hitsr->bptr, nnz); *max_nnzb = hitsr->bptr.data[1] - hitsr->bptr.data[0]; sptNnzIndex sum_nnzb = 0; for(sptIndex i=0; i < hitsr->bptr.len - 1; ++i) { sptNnzIndex nnzb = hitsr->bptr.data[i+1] - hitsr->bptr.data[i]; sum_nnzb += nnzb; if(*max_nnzb < nnzb) { *max_nnzb = nnzb; } } sptAssert(sum_nnzb == hitsr->nnz); sptStopTimer(gen_timer); sptPrintElapsedTime(gen_timer, "\tGenerate HiCOO"); sptFreeTimer(gen_timer); free(block_begin); free(block_end); free(block_begin_prior); free(block_coord); return 0; }
CometMatrix.h
// This file os part of FVM // Copyright (c) 2012 FVM Authors // See LICENSE file for terms. #ifndef _COMETMATRIX_H_ #define _COMETMATRIX_H_ #include "MatrixJML.h" #include "Array.h" #include "ArrayBase.h" #include "NumType.h" #include "SquareMatrixESBGK.h" #include <omp.h> template<class T> class CometMatrix : public MatrixJML<T> { public: typedef Array<T> TArray; typedef typename NumTypeTraits<T>::T_Scalar T_Scalar; CometMatrix(const int order): _elements(7*order-18), _values(_elements), _order(order), _AMat(3), _vel(3) { _values=0.; _AMat.zero(); _vel=0; } T& getElement(const int i,const int j) { int index; if(i==j) { index=(i-1); return _values[index]; } else if(j==_order) { index=_order+3*i-1; return _values[index]; } else if(j==_order-1) { index=_order+3*i-2; return _values[index]; } else if(j==_order-2) { index=_order+3*i-3; return _values[index]; } else if(i==_order) { index=6*_order+j-16; return _values[index]; } else if(i==_order-1) { index=5*_order+j-13; return _values[index]; } else if(i==_order-2) { index=4*_order+j-10; return _values[index]; } else { throw CException("Invalid index for Comet matrix"); return _values[0]; } } void printElement(const int& i,const int& j) { int index; if(i==j) { index=(i-1); cout<<_values[index]<<endl; } else if(j==_order) { index=i-1+_order; cout<<_values[index]<<endl; } else if(i==_order) { index=2*_order+j-2; cout<<_values[index]<<endl; } else { throw CException("Invalid index for Comet matrix"); } } void Solve(TArray& bVec) { //Replaces bVec with solution vector. T an1i; T an2i; T an3i; T ain1; T ain2; T ain3; T aii; T alpha0; T alpha1; T alpha2; alpha0=0.; alpha1=0.; alpha2=0.; //#pragma omp parallel for default(shared) private(i,an1i,an2i,an3i,aii,ain1,ain2,ain3) { for(int i=1;i<_order-2;i++) { an1i=getElement(_order-2,i); an2i=getElement(_order-1,i); an3i=getElement(_order,i); aii=getElement(i,i); ain1=getElement(i,_order-2); ain2=getElement(i,_order-1); ain3=getElement(i,_order); _AMat.getElement(1,1)-=an1i*ain1/aii; _AMat.getElement(1,2)-=an1i*ain2/aii; _AMat.getElement(1,3)-=an1i*ain3/aii; _AMat.getElement(2,1)-=an2i*ain1/aii; _AMat.getElement(2,2)-=an2i*ain2/aii; _AMat.getElement(2,3)-=an2i*ain3/aii; _AMat.getElement(3,1)-=an3i*ain1/aii; _AMat.getElement(3,2)-=an3i*ain2/aii; _AMat.getElement(3,3)-=an3i*ain3/aii; alpha0+=an1i*bVec[i-1]/aii; alpha1+=an2i*bVec[i-1]/aii; alpha2+=an3i*bVec[i-1]/aii; } } _AMat.getElement(1,1)+=getElement(_order-2,_order-2); _AMat.getElement(2,2)+=getElement(_order-1,_order-1); _AMat.getElement(3,3)+=getElement(_order,_order); _vel[0]=(bVec[_order-3]-alpha0); _vel[1]=(bVec[_order-2]-alpha1); _vel[2]=(bVec[_order-1]-alpha2); _AMat.Solve(_vel); bVec[_order-3]=_vel[0]; bVec[_order-2]=_vel[1]; bVec[_order-1]=_vel[2]; //#pragma omp parallel for default(shared) private(i,aii,ain1,ain2,ain3) { for(int i=1;i<_order-2;i++) { ain1=getElement(i,_order-2); ain2=getElement(i,_order-1); ain3=getElement(i,_order); aii=getElement(i,i); bVec[i-1]=(bVec[i-1]-ain1*bVec[_order-3]-ain2*bVec[_order-2]-ain3*bVec[_order-1])/aii; } } } void zero() { for(int i=0;i<_elements;i++) _values[i]=T_Scalar(0); } T getTraceAbs() { T trace=0.; for(int i=0;i<_order;i++) trace+=fabs(_values[i]); return trace; } private: int _elements; TArray _values; int _order; SquareMatrixESBGK<T> _AMat; TArray _vel; }; #endif
convolution_winograd_dot_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_winograd_dot_pack8_avx(Mat& bottom_blob_tm, int outch, const Mat& kernel_tm, Mat& top_blob_tm, const Option& opt) { // Mat bottom_blob_tm(tiles, 16/36/64, inch, 32u, 4, opt.workspace_allocator); const int tiles = bottom_blob_tm.w; const int batch = bottom_blob_tm.h; const int inch = bottom_blob_tm.c; // permute Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, batch, 32u, 8, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, batch, 32u, 8, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, batch, 32u, 8, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, batch, 32u, 8, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, batch, 32u, 8, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < batch; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x12 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _r2 = _mm256_load_ps(r0 + 8 * 2); __m256 _r3 = _mm256_load_ps(r0 + 8 * 3); __m256 _r4 = _mm256_load_ps(r0 + 8 * 4); __m256 _r5 = _mm256_load_ps(r0 + 8 * 5); __m256 _r6 = _mm256_load_ps(r0 + 8 * 6); __m256 _r7 = _mm256_load_ps(r0 + 8 * 7); __m256 _r8 = _mm256_load_ps(r0 + 8 * 8); __m256 _r9 = _mm256_load_ps(r0 + 8 * 9); __m256 _ra = _mm256_load_ps(r0 + 8 * 10); __m256 _rb = _mm256_load_ps(r0 + 8 * 11); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5); __m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5); __m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7); __m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7); __m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9); __m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9); __m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb); __m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb); __m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0)); _r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0)); _r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0)); _r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0)); _r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1)); _r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1)); _r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1)); _r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1)); _ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1)); _rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); _mm256_store_ps(tmpptr + 8 * 4, _r4); _mm256_store_ps(tmpptr + 8 * 5, _r5); _mm256_store_ps(tmpptr + 8 * 6, _r6); _mm256_store_ps(tmpptr + 8 * 7, _r7); _mm256_store_ps(tmpptr + 8 * 8, _r8); _mm256_store_ps(tmpptr + 8 * 9, _r9); _mm256_store_ps(tmpptr + 8 * 10, _ra); _mm256_store_ps(tmpptr + 8 * 11, _rb); tmpptr += 96; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _r2 = _mm256_load_ps(r0 + 8 * 2); __m256 _r3 = _mm256_load_ps(r0 + 8 * 3); __m256 _r4 = _mm256_load_ps(r0 + 8 * 4); __m256 _r5 = _mm256_load_ps(r0 + 8 * 5); __m256 _r6 = _mm256_load_ps(r0 + 8 * 6); __m256 _r7 = _mm256_load_ps(r0 + 8 * 7); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5); __m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5); __m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7); __m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7); __m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0)); _r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0)); _r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1)); _r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1)); _r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1)); _r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); _mm256_store_ps(tmpptr + 8 * 4, _r4); _mm256_store_ps(tmpptr + 8 * 5, _r5); _mm256_store_ps(tmpptr + 8 * 6, _r6); _mm256_store_ps(tmpptr + 8 * 7, _r7); tmpptr += 64; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _r2 = _mm256_load_ps(r0 + 8 * 2); __m256 _r3 = _mm256_load_ps(r0 + 8 * 3); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1)); _r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); tmpptr += 32; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x2 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); _r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); tmpptr += 16; r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _val = _mm256_load_ps(r0); _mm256_store_ps(tmpptr, _val); tmpptr += 8; r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, batch, outch, 32u, 8, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < batch; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); __m256 _sum2 = _mm256_setzero_ps(); __m256 _sum3 = _mm256_setzero_ps(); __m256 _sum4 = _mm256_setzero_ps(); __m256 _sum5 = _mm256_setzero_ps(); __m256 _sum6 = _mm256_setzero_ps(); __m256 _sum7 = _mm256_setzero_ps(); __m256 _sum8 = _mm256_setzero_ps(); __m256 _sum9 = _mm256_setzero_ps(); __m256 _suma = _mm256_setzero_ps(); __m256 _sumb = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(r0 + 2); __m256 _val3 = _mm256_broadcast_ss(r0 + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(r0 + 4); __m256 _val5 = _mm256_broadcast_ss(r0 + 5); _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(r0 + 6); __m256 _val7 = _mm256_broadcast_ss(r0 + 7); _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); __m256 _val8 = _mm256_broadcast_ss(r0 + 8); __m256 _val9 = _mm256_broadcast_ss(r0 + 9); _sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9); __m256 _vala = _mm256_broadcast_ss(r0 + 10); __m256 _valb = _mm256_broadcast_ss(r0 + 11); _suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); _mm256_store_ps(output0_tm + 8 * 2, _sum2); _mm256_store_ps(output0_tm + 8 * 3, _sum3); _mm256_store_ps(output0_tm + 8 * 4, _sum4); _mm256_store_ps(output0_tm + 8 * 5, _sum5); _mm256_store_ps(output0_tm + 8 * 6, _sum6); _mm256_store_ps(output0_tm + 8 * 7, _sum7); _mm256_store_ps(output0_tm + 8 * 8, _sum8); _mm256_store_ps(output0_tm + 8 * 9, _sum9); _mm256_store_ps(output0_tm + 8 * 10, _suma); _mm256_store_ps(output0_tm + 8 * 11, _sumb); output0_tm += 8 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); __m256 _sum2 = _mm256_setzero_ps(); __m256 _sum3 = _mm256_setzero_ps(); __m256 _sum4 = _mm256_setzero_ps(); __m256 _sum5 = _mm256_setzero_ps(); __m256 _sum6 = _mm256_setzero_ps(); __m256 _sum7 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(r0 + 2); __m256 _val3 = _mm256_broadcast_ss(r0 + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(r0 + 4); __m256 _val5 = _mm256_broadcast_ss(r0 + 5); _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(r0 + 6); __m256 _val7 = _mm256_broadcast_ss(r0 + 7); _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); _mm256_store_ps(output0_tm + 8 * 2, _sum2); _mm256_store_ps(output0_tm + 8 * 3, _sum3); _mm256_store_ps(output0_tm + 8 * 4, _sum4); _mm256_store_ps(output0_tm + 8 * 5, _sum5); _mm256_store_ps(output0_tm + 8 * 6, _sum6); _mm256_store_ps(output0_tm + 8 * 7, _sum7); output0_tm += 8 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); __m256 _sum2 = _mm256_setzero_ps(); __m256 _sum3 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(r0 + 2); __m256 _val3 = _mm256_broadcast_ss(r0 + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); _mm256_store_ps(output0_tm + 8 * 2, _sum2); _mm256_store_ps(output0_tm + 8 * 3, _sum3); output0_tm += 8 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); output0_tm += 8 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); r0 += 1; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); output0_tm += 8; } } } }
omp_get_num_procs.c
// omp_get_num_procs.c // compile with: /openmp /* ############################################################################# ## DESCRIPTION: Using omp_get_num_procs() to show your CPUS. ## NAME: omp_get_num_procs.c ## AUTHOR: Lucca Pessoa da Silva Matos ## DATE: 10.04.2020 ## VERSION: 1.0 ## EXEMPLE: ## PS C:\> gcc -fopenmp -o omp_get_num_procs omp_get_num_procs.c ##############################################################################*/ // ============================================================================= // LIBRARYS // ============================================================================= #include <omp.h> #include <stdio.h> #include <locale.h> #include <stdlib.h> // ============================================================================= // MACROS // ============================================================================= #define NUM_THREADS 4 // ============================================================================= // CALL FUNCTIONS // ============================================================================= void cabecalho(); void set_portuguese(); // ============================================================================= // MAIN // ============================================================================= int main(int argc, char const *argv[]){ set_portuguese(); cabecalho(); omp_set_num_threads(NUM_THREADS); printf("\nQuantidade de CPU(s) disponíveis no momento: %d...\n", omp_get_num_procs()); printf("\n1 - Estamos fora do contexto paralelo...\n\n"); // Fork #pragma omp parallel { int id = omp_get_num_threads(); int thread_id = omp_get_thread_num(); printf("Eu sou a Thread %d de um total de %d\n", thread_id, id); } // Join printf("\n2 - Estamos fora do contexto paralelo...\n\n"); return 0; } // ============================================================================= // FUNCTIONS // ============================================================================= void set_portuguese(){ setlocale(LC_ALL, "Portuguese"); } void cabecalho(){ printf("\n**************************************************"); printf("\n* *"); printf("\n* *"); printf("\n* PROGRAMACAO PARALELA COM OPENMP - LUCCA PESSOA *"); printf("\n* *"); printf("\n* *"); printf("\n**************************************************\n"); }
bezier_post_utility.h
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_BEZIER_POST_UTILITY_H_INCLUDED ) #define KRATOS_BEZIER_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" #ifdef ISOGEOMETRIC_USE_MPI #include "mpi.h" #endif // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "includes/ublas_interface.h" #include "includes/legacy_structural_app_vars.h" #include "spaces/ublas_space.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" #include "utilities/auto_collapse_spatial_binning.h" #include "custom_geometries/isogeometric_geometry.h" #include "custom_utilities/isogeometric_post_utility.h" #include "isogeometric_application/isogeometric_application.h" // #define DEBUG_LEVEL1 //#define DEBUG_LEVEL2 //#define DEBUG_MULTISOLVE //#define DEBUG_GENERATE_MESH // #define ENABLE_PROFILING namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template<typename TDataType> struct BezierPostUtility_Helper { typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; /// Interpolation on element static TDataType& CalculateOnPoint(const Variable<TDataType>& rVariable, TDataType& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates) { KRATOS_THROW_ERROR(std::logic_error, "Error calling unimplemented function", __FUNCTION__) } }; /// Short class definition. /** An advanced utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated. One shall carefully use this utility for large problem. Previously, this class is named IsogeometricPostUtility. */ class BezierPostUtility : public IsogeometricPostUtility { public: ///@name Type Definitions ///@{ typedef boost::numeric::ublas::vector<double> ValuesContainerType; typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType; typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType; typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType; typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType; typedef std::size_t IndexType; /// Pointer definition of BezierPostUtility KRATOS_CLASS_POINTER_DEFINITION(BezierPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. BezierPostUtility() { } /// Destructor. virtual ~BezierPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferNodalResults( const TVariableType& rThisVariable, ModelPart& r_model_part, ModelPart& r_model_part_post) const { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); #endif NodesArrayType& pTargetNodes = r_model_part_post.Nodes(); ElementsArrayType& pElements = r_model_part.Elements(); typename TVariableType::Type Results; CoordinatesArrayType LocalPos; int ElementId; // #pragma omp parallel for //TODO: check this. This is not parallelized. for(typename NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it) { ElementId = (*it)->GetSolutionStepValue(PARENT_ELEMENT_ID); noalias(LocalPos) = (*it)->GetSolutionStepValue(LOCAL_COORDINATES); Results = BezierPostUtility_Helper<typename TVariableType::Type>::CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos); (*it)->GetSolutionStepValue(rThisVariable) = Results; } #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl; #else std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed" << std::endl; #endif } // Synchronize post model_part with the reference model_part template<class TVariableType> void TransferIntegrationPointResults( const TVariableType& rThisVariable, ModelPart& r_model_part, ModelPart& r_model_part_post, LinearSolverType::Pointer pSolver) const { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results for " << rThisVariable.Name() << " starts" << std::endl; #endif // firstly transfer rThisVariable from integration points of reference model_part to its nodes TransferVariablesToNodes(pSolver, r_model_part, rThisVariable); // secondly transfer new nodal variables results to the post model_part TransferNodalResults(rThisVariable, r_model_part, r_model_part_post); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } // Transfer the variable to nodes for model_part template<class TVariableType> void TransferVariablesToNodes( const TVariableType& rThisVariable, ModelPart& r_model_part, LinearSolverType::Pointer pSolver) const { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " starts" << std::endl; #endif TransferVariablesToNodes(pSolver, r_model_part, rThisVariable); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } // Transfer the variable to nodes for model_part template<class TVariableType> void TransferVariablesToNodes( const TVariableType& rThisVariable, ModelPart& r_model_part, ElementsArrayType& ElementsArray, LinearSolverType::Pointer pSolver) const { #ifdef ENABLE_PROFILING double start_compute = OpenMPUtils::GetCurrentTime(); std::cout << "########################################" << std::endl; std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " starts" << std::endl; #endif TransferVariablesToNodes(pSolver, r_model_part, ElementsArray, rThisVariable); #ifdef ENABLE_PROFILING double end_compute = OpenMPUtils::GetCurrentTime(); std::cout << "Transfer integration point results to nodes for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << "s" << std::endl; std::cout << "########################################" << std::endl; #endif } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "BezierPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "BezierPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values * @param check_active if false the activeness of the elements will not be checked; true otherwise * REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray */ void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver, ModelPart& r_model_part, ElementsArrayType& ElementsArray, const Variable<double>& rThisVariable, const bool& check_active = false) const; /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values * REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part. * + the activeness of the element will not be checked */ void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver, ModelPart& r_model_part, const Variable<double>& rThisVariable, const bool& check_active = false) const; /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values * @param ncomponents number of components of the nodal vector * @param check_active if false the activeness of the elements will not be checked; true otherwise * REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray */ void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver, ModelPart& r_model_part, ElementsArrayType& ElementsArray, const Variable<Vector>& rThisVariable, const std::size_t& ncomponents = 6, const bool& check_active = false) const; /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * REMARKS: * + currently this method only works with 6-components variable like STRESSES, PRESTRESS, etc * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values * @param ncomponents number of components of the nodal vector * REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part. * + the activeness of the element will not be checked */ void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver, ModelPart& r_model_part, const Variable<Vector>& rThisVariable, const std::size_t& ncomponents = 6, const bool& check_active = false) const; /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values * @param ncomponents number of components of the nodal vector * @param check_active if false the activeness of the elements will not be checked; true otherwise * REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray */ void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver, ModelPart& r_model_part, ElementsArrayType& ElementsArray, const Variable<array_1d<double, 3>>& rThisVariable, const std::size_t& ncomponents = 3, const bool& check_active = false) const; /** * Transfer of rThisVariable defined on integration points to corresponding * nodal values. The transformation is done in a form that ensures a minimization * of L_2-norm error (/sum{rThisVariable- f(x)) whereas * f(x)= /sum{shape_func_i*rThisVariable_i} * @param model_part model_part on which the transfer should be done * @param rThisVariable Vector-Variable which should be transferred * @ref Jiao&Heath: "Common-refinement-based data transfer...", Int. * Journal for numer. meth. in eng. 61 (2004) 2402--2427 * WARNING: this may cause segmentation faults as the respective variables * will be created on nodal level while they are originally intended to be * stored on integration points! * REMARKS: * + currently this method only works with 3-components variable like WATER_FLOW, HEAT_FLOW, etc * @param pSolver the solver used for solving the local system matrix * @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes * @param rThisVariable the variable need to transfer the respected values * @param ncomponents number of components of the nodal vector * REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part. * + the activeness of the element will not be checked */ void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver, ModelPart& r_model_part, const Variable<array_1d<double, 3>>& rThisVariable, const std::size_t& ncomponents = 3, const bool& check_active = false) const; ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BezierPostUtility& operator=(BezierPostUtility const& rOther) { return *this; } /// Copy constructor. BezierPostUtility(BezierPostUtility const& rOther) { } ///@} }; // Class BezierPostUtility ///@} template<> struct BezierPostUtility_Helper<double> { typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; /// Interpolation on element static double& CalculateOnPoint(const Variable<double>& rVariable, double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates); }; template<> struct BezierPostUtility_Helper<Vector> { typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; /// Interpolation on element static Vector& CalculateOnPoint(const Variable<Vector>& rVariable, Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates); }; template<> struct BezierPostUtility_Helper<array_1d<double, 3> > { typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; /// Interpolation on element static array_1d<double, 3>& CalculateOnPoint(const Variable<array_1d<double, 3> >& rVariable, array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates); }; ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, BezierPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const BezierPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #undef DEBUG_LEVEL1 #undef DEBUG_LEVEL2 #undef DEBUG_MULTISOLVE #undef DEBUG_GENERATE_MESH #undef ENABLE_PROFILING #endif
GB_unaryop__minv_uint32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_uint8 // op(A') function: GB_tran__minv_uint32_uint8 // C type: uint32_t // A type: uint8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_uint8 ( uint32_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2bp_init( blake2bp_state *S, const uint8_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( int argc, char **argv ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[KAT_LENGTH]; for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; //blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES ); blake2bp_state S[1]; blake2bp_init_key( S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES ); blake2bp_update( S, buf, i ); blake2bp_final( S, hash, BLAKE2B_OUTBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { puts( "error" ); return -1; } } puts( "ok" ); return 0; } #endif
prod-cons.c
#include <omp.h> #include <stdio.h> #define SIZE 100000 int flag = 0; void fill_rand(int N,double A[]) { for(int i=0;i<N;++i) A[i] = 1; printf("Producer populated data\n"); #pragma omp flush flag = 1; #pragma omp flush(flag) } double Sum_array(int N,double A[]) { double sum = 0.0; int p_flag; while(1) { p_flag = 0; #pragma omp flush(flag) p_flag = flag; if(p_flag) break; } #pragma omp flush for(int i=0;i<N;++i) sum = sum + A[i]; printf("Consumer calculated Array sum\n" ); return sum; } double seq_prod_cons() { double A[SIZE]; fill_rand(SIZE,A); double sum = Sum_array(SIZE,A); return sum; } double parallel_prod_cons() { double A[SIZE]; double sum = 0.0; omp_set_num_threads(2); #pragma omp parallel sections { #pragma omp section fill_rand(SIZE,A); #pragma omp section sum = Sum_array(SIZE,A); } return sum; } int main() { double time_taken_seq,time_taken_parallel,sum=0.0; //Sequential Producer-Consumer time_taken_seq = omp_get_wtime(); sum = seq_prod_cons(); time_taken_seq = omp_get_wtime() - time_taken_seq; printf("In %lf seconds, Sequential code gives sum : %lf \n",time_taken_seq,sum); //Parallel Producer-Consumer time_taken_parallel = omp_get_wtime(); sum = parallel_prod_cons(); time_taken_parallel = omp_get_wtime() - time_taken_parallel; printf("In %lf seconds, Parallel code gives sum : %lf \n",time_taken_parallel,sum); printf("Speed up : %lf\n", time_taken_parallel/time_taken_seq); }
reduction_teams.c
#include <stdio.h> #include <omp.h> #define N 1000000ll #define SUM (N * (N-1)/2) void checkHost(int gpu_error, int* errors, long long a){ int host_error = 0; if (a != SUM){ printf ("Host - Incorrect result = %lld, expected = %lld!\n", a, SUM); host_error = 1; (*errors)++; } if(!host_error && !gpu_error){ printf("-----> Success\n"); } else{ printf("-----> Failure\n"); } } void reduction(int num_teams, int num_threads, int* errors){ long long result = 0; int gpu_error = 0; #pragma omp target teams num_teams(num_teams) thread_limit(num_threads) map(tofrom: result) { long long a, i; a = 0; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; } result = a; if (a != SUM && omp_get_team_num() <= 50){ //limit teams that print printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } } //end of target checkHost(gpu_error, errors, result); } int main (void) { int errors = 0; int gpu_error = 0; printf("\n---------- Multiple Teams ----------\n"); printf("\nRunning 2 Teams with 64 thread per team\n"); reduction(2, 64, &errors); printf("\nRunning 2 Teams with 128 threads per team\n"); reduction(2, 128, &errors); printf("\nRunning 2 Teams with 256 threads per team\n"); reduction(2, 256, &errors); printf("\nRunning 256 Teams with 256 threads per team (Limited to print first 50 teams)\n"); reduction(256, 256, &errors); printf("\nRunning 4096 Teams with 64 threads per team (Limited to print first 50 teams)\n"); reduction(4096, 64, &errors); printf("\nRunning 4096 Teams with 256 threads per team (Limited to print first 50 teams)\n"); reduction(4096, 256, &errors); if(!errors){ printf("\nRESULT: ALL RUNS SUCCESSFUL!\n"); return 0; } else{ printf("\nRESULT: FAILURES OCCURED!\n"); return -1; } }
pr86025.c
/* PR c++/86025 */ /* { dg-do compile } */ /* { dg-additional-options "-Wduplicated-branches" } */ int i; void foo (int x) { if (x) { #pragma omp critical (foo) i++; } else { #pragma omp critical i++; } }
ams.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */ hypre_ParCSRMatrix *A, /* right-hand side */ hypre_ParVector *f, /* relaxation type */ HYPRE_Int relax_type, /* number of sweeps */ HYPRE_Int relax_times, /* l1 norms of the rows of A */ HYPRE_Real *l1_norms, /* damping coefficient (usually <= 1) */ HYPRE_Real relax_weight, /* SOR parameter (usually in (0,2) */ HYPRE_Real omega, /* for cheby smoothers */ HYPRE_Real max_eig_est, HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, /* initial/updated approximation */ hypre_ParVector *u, /* temporary vector */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z) { HYPRE_Int sweep; HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v)); for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { HYPRE_Int i, num_rows = hypre_ParCSRMatrixNumRows(A); hypre_ParVectorCopy(f,v); hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v); /* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */ for (i = 0; i < num_rows; i++) u_data[i] += v_data[i] / l1_norms[i]; } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real,num_cols_offd); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data); } if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } } else if (relax_weight == 1.0) /* SSOR */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } } else /* scaled SSOR */ { HYPRE_Real dif; HYPRE_Real c1 = omega * relax_weight; HYPRE_Real c2 = omega * (1.0 - relax_weight); /* Forward local pass (save initial guess in v_data) */ for (i = 0; i < num_rows; i++) { dif = 0.0; v_data[i] = u_data[i]; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] < i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { dif = 0.0; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] > i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } } hypre_TFree(u_offd_data); } else if (relax_type == 3) /* Kaczmarz */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real,num_cols_offd); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } hypre_TFree(u_offd_data); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data_[d][i] = x_data[dim*i+d]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data[dim*i+d] = x_data_[d][i]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm += fabs(A_diag_data[j]); if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm += fabs(A_offd_data[j]); if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (A_diag_J[j] == i) A_diag_data[j] = 1.0; else A_diag_data[j] = 0.0; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) A_offd_data[j] = 0.0; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows); HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data); } if (option == 1) { for (i = 0; i < num_rows; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = 0; i < num_rows; i++) { /* Add the diag element of the ith row */ l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]); if (cf_marker == NULL) { /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = 0; i < num_rows; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = 0; i < num_rows; i++) { /* Add the diag element of the ith row */ diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]); if (cf_marker == NULL) { /* Add the scaled l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the scaled CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } /* Handle negative definite matrices */ for (i = 0; i < num_rows; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = 0; i < num_rows; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } hypre_TFree(cf_marker_offd); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i]))) { A_diag_data[j] = d; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) hypre_ParCSRMatrixDestroy(ams_data -> A_G); if (!ams_data -> beta_is_zero) if (ams_data -> B_G) HYPRE_BoomerAMGDestroy(ams_data -> B_G); if (ams_data -> owns_Pi && ams_data -> Pi) hypre_ParCSRMatrixDestroy(ams_data -> Pi); if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); if (ams_data -> B_Pi) HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); if (ams_data -> owns_Pi && ams_data -> Pix) hypre_ParCSRMatrixDestroy(ams_data -> Pix); if (ams_data -> A_Pix) hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); if (ams_data -> B_Pix) HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); if (ams_data -> owns_Pi && ams_data -> Piy) hypre_ParCSRMatrixDestroy(ams_data -> Piy); if (ams_data -> A_Piy) hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); if (ams_data -> B_Piy) HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); if (ams_data -> owns_Pi && ams_data -> Piz) hypre_ParCSRMatrixDestroy(ams_data -> Piz); if (ams_data -> A_Piz) hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); if (ams_data -> B_Piz) HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); if (ams_data -> r0) hypre_ParVectorDestroy(ams_data -> r0); if (ams_data -> g0) hypre_ParVectorDestroy(ams_data -> g0); if (ams_data -> r1) hypre_ParVectorDestroy(ams_data -> r1); if (ams_data -> g1) hypre_ParVectorDestroy(ams_data -> g1); if (ams_data -> r2) hypre_ParVectorDestroy(ams_data -> r2); if (ams_data -> g2) hypre_ParVectorDestroy(ams_data -> g2); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> A); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> G0); if (ams_data -> A_G0) hypre_ParCSRMatrixDestroy(ams_data -> A_G0); if (ams_data -> B_G0) HYPRE_BoomerAMGDestroy(ams_data -> B_G0); if (ams_data -> A_l1_norms) hypre_TFree(ams_data -> A_l1_norms); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) hypre_TFree(ams_data); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 2 && dim != 3) hypre_error_in_arg(2); ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, DBL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) ams_data -> beta_is_zero = 1; else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, DBL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_Int global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_Int col_starts_size, *col_starts; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_Int *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_Int,col_starts_size); for (i = 0; i < col_starts_size; i++) col_starts[i] = dim * col_starts_G[i]; Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); for (i = 0; i < G_diag_nrows+1; i++) Pi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_Int *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) Pi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) Pi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(Pix) = 0; hypre_ParCSRMatrixInitialize(Pix); Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(Piy) = 0; hypre_ParCSRMatrixInitialize(Piy); if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(Piz) = 0; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_Int *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_Int *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_Int *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_Int *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_Int *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; *Piy_ptr = Piy; if (dim == 3) *Piz_ptr = Piz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_Int global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_Int col_starts_size, *col_starts; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_Int *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_Int,col_starts_size); for (i = 0; i < col_starts_size; i++) col_starts[i] = dim * col_starts_G[i]; GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0; hypre_ParCSRMatrixOwnsColStarts(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 4) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); for (i = 0; i < G_diag_nrows+1; i++) GPi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_Int *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) GPi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) GPi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data=hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i+1]; j++) G0tdA[j] = 0.0; if (G0toI) for (j = G0toI[i]; j < G0toI[i+1]; j++) G0toA[j] = 0.0; } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t); hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixAdd(A_local, B_local); C_local = hypre_CSRMatrixDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 1; hypre_ParCSRMatrixOwnsColStarts(G0t) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &ams_data -> A_l1_norms); /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && ams_data -> y != NULL && (ams_data -> dim == 2 || ams_data -> z != NULL)) input_info = 1; if (ams_data -> Gx != NULL && ams_data -> Gy != NULL && (ams_data -> dim == 2 || ams_data -> Gz != NULL)) input_info = 2; if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { HYPRE_Int G_owned_col_starts; if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) hypre_MatvecCommPkgCreate(ams_data -> G); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G); hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts; hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0; ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, 0, 0); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_Int P_owned_col_starts; HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) hypre_MatvecCommPkgCreate(ams_data -> Pix); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0; } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, 0, 0); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) hypre_MatvecCommPkgCreate(ams_data -> Piy); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0; } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, 0, 0); if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) hypre_MatvecCommPkgCreate(ams_data -> Piz); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0; } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, 0, 0); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) hypre_MatvecCommPkgCreate(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); hypre_ParCSRMatrixOwnsColStarts(Gt) = 0; hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0; /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) Gt_diag_data[j] *= h2; for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++) Gt_offd_data[j] *= h2; } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } GGt = hypre_ParMatmul(ams_data -> G, Gt); hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ { hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixAdd(A_local, B_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; } hypre_ParCSRMatrixDestroy(GGt); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } else { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0; } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); else HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; hypre_ParVector *z = NULL; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); hypre_ParVectorSetPartitioningOwner(z,0); } if (ams_data -> print_level > 0) hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle,"%s","020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle,"%s","(0+2)"); break; case 11: case 13: hypre_sprintf(cycle,"%s","0345430"); break; case 12: hypre_sprintf(cycle,"%s","(0+3+4+5)"); break; case 14: hypre_sprintf(cycle,"%s","0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","010"); break; case 1: default: hypre_sprintf(cycle,"%s","01210"); break; case 2: hypre_sprintf(cycle,"%s","(0+1+2)"); break; case 3: hypre_sprintf(cycle,"%s","02120"); break; case 4: hypre_sprintf(cycle,"%s","(010+2)"); break; case 5: hypre_sprintf(cycle,"%s","0102010"); break; case 6: hypre_sprintf(cycle,"%s","(020+1)"); break; case 7: hypre_sprintf(cycle,"%s","0201020"); break; case 8: hypre_sprintf(cycle,"%s","0(+1+2)0"); break; case 9: hypre_sprintf(cycle,"%s","01210"); break; case 11: hypre_sprintf(cycle,"%s","013454310"); break; case 12: hypre_sprintf(cycle,"%s","(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle,"%s","034515430"); break; case 14: hypre_sprintf(cycle,"%s","01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle,"%s","020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i+1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) hypre_error(HYPRE_ERROR_CONV); if (z) hypre_ParVectorDestroy(z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') continue; /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x,r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) hypre_error_in_arg(16); /* skip empty subspaces */ if (!A[i]) continue; /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x,g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_Int *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1); HYPRE_Int part_size, *row_starts, *col_starts; HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2*nedges); for (i = 0; i <= nedges; i++) I[i] = 2*i; if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2*nedges; i+=2) { data[i] = -1.0; data[i+1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*nedges; i+=2) { if (edge_vertex[i] < edge_vertex[i+1]) { data[i] = -1.0; data[i+1] = 1.0; } else { data[i] = 1.0; data[i+1] = -1.0; } } } else hypre_error_in_arg(4); hypre_CSRMatrixI(local) = I; hypre_CSRMatrixJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Copy partitioning from A and x_coord (previously they were re-used) */ #ifdef HYPRE_NO_GLOBAL_PARTITION part_size = 2; #else hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size); part_size++; #endif row_starts = hypre_TAlloc(HYPRE_Int,part_size); col_starts = hypre_TAlloc(HYPRE_Int,part_size); for (i = 0; i < part_size; i++) { row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i]; col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i]; } /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), row_starts, col_starts, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 1; hypre_ParCSRMatrixOwnsColStarts(G) = 1; GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_Int *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_Int *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int *vert_part, num_global_vert; HYPRE_Int vert_start, vert_end; /* Find the processor partitioning of the vertices */ #ifdef HYPRE_NO_GLOBAL_PARTITION vert_part = hypre_TAlloc(HYPRE_Int,2); hypre_MPI_Scan(&num_local_vert, &vert_part[1], 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - num_local_vert; hypre_MPI_Allreduce(&num_local_vert, &num_global_vert, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); vert_part = hypre_TAlloc(HYPRE_Int,num_procs+1); hypre_MPI_Allgather(&num_local_vert, 1, HYPRE_MPI_INT, &vert_part[1], 1, HYPRE_MPI_INT, comm); vert_part[0] = 0; for (i = 0; i < num_procs; i++) vert_part[i+1] += vert_part[i]; num_global_vert = vert_part[num_procs]; #endif /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; hypre_ParVectorOwnsPartitioning(x_coord) = 0; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; hypre_ParVectorOwnsPartitioning(y_coord) = 0; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; hypre_ParVectorOwnsPartitioning(z_coord) = 0; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = vert_number[i] - vert_start; x_data[j] = vert_coord[3*i]; y_data[j] = vert_coord[3*i+1]; z_data[j] = vert_coord[3*i+2]; } } /* Change vertex numbers from local to global */ for (i = 0; i < 2*num_edges; i++) edge_vertex[i] = vert_number[edge_vertex[i]]; /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2*num_edges); for (i = 0; i <= num_edges; i++) I[i] = 2*i; /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*num_edges; i+=2) { data[i] = 1.0; data[i+1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 0; hypre_ParCSRMatrixOwnsColStarts(G) = 1; GenerateDiagAndOffd(local, G, vert_start, vert_end); hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) hypre_ParCSRMatrixDestroy(ams_data -> G); if (ams_data -> x) hypre_ParVectorDestroy(ams_data -> x); if (ams_data -> y) hypre_ParVectorDestroy(ams_data -> y); if (ams_data -> z) hypre_ParVectorDestroy(ams_data -> z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_CTAlloc(HYPRE_Real, num_rows); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows/num_threads; rest = num_rows - size*num_threads; if (k < rest) { ns = k*size+k; ne = (k+1)*size+k+1; } else { ns = k*size+rest; ne = (k+1)*size+rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } hypre_TFree(cf_marker_offd); *l1_norm_ptr = l1_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRRelaxThreads * 1 = l1-scaled Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int relax_type, HYPRE_Int relax_times, HYPRE_Real *l1_norms, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *z) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data; HYPRE_Real *v_buf_data; HYPRE_Real *tmp_data; HYPRE_Int i, j; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id; HYPRE_Real zero = 0.0; HYPRE_Real res, res2; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /* only allow jacobi and GS */ if (relax_type > 2) relax_type = 2; /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } if (relax_type == 1) /* Jacobi */ { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (relax_weight*res)/l1_norms[i]; } } } else if (relax_type == 2) /* GS */ { if (relax_weight == 1 && omega == 1) { tmp_data = hypre_CTAlloc(HYPRE_Real,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } hypre_TFree(tmp_data); } else { HYPRE_Real c1 = omega*relax_weight; HYPRE_Real c2 = omega*(1.0-relax_weight); tmp_data = hypre_CTAlloc(HYPRE_Real,n); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { tmp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; Vtemp_data[i] = u_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii < i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii > i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } } hypre_TFree(tmp_data); } } /* end of Jacobi or G.S. */ if (num_procs > 1) { hypre_TFree(Vext_data); hypre_TFree(v_buf_data); } return(relax_error); }
grid_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <math.h> #include <complex.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" #include "gto/grid_ao_drv.h" #include "np_helper/np_helper.h" #define ALL_IMAGES 255 #define IMGBLK 40 #define OF_CMPLX 2 double CINTcommon_fac_sp(int l); void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv1(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv2(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv3(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart_deriv4(double *cgto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); void GTOshell_eval_grid_ip_cart(double *gto, double *ri, double *exps, double *coord, double *alpha, double *coeff, double *env, int l, int np, int nc, size_t nao, size_t ngrids, size_t bgrids); /* * Extend the meaning of non0table: given shell ID and block ID, * non0table is the number of images in Ls that does not vanish. * Ls should be sorted based on the distance to center cell. */ void PBCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids, double *Ls, int nimgs, int *atm, int natm, int *bas, int nbas, double *env) { const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; #pragma omp parallel { int i, j, m; int np, nc, atm_id; size_t bas_id, ib; double rr, arr, maxc; double logcoeff[NPRIMAX]; double dr[3]; double rL[3]; double *p_exp, *pcoeff, *ratm; #pragma omp for nowait schedule(dynamic) for (bas_id = 0; bas_id < nbas; bas_id++) { np = bas[NPRIM_OF+bas_id*BAS_SLOTS]; nc = bas[NCTR_OF +bas_id*BAS_SLOTS]; p_exp = env + bas[PTR_EXP+bas_id*BAS_SLOTS]; pcoeff = env + bas[PTR_COEFF+bas_id*BAS_SLOTS]; atm_id = bas[ATOM_OF+bas_id*BAS_SLOTS]; ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD]; for (j = 0; j < np; j++) { maxc = 0; for (i = 0; i < nc; i++) { maxc = MAX(maxc, fabs(pcoeff[i*np+j])); } logcoeff[j] = log(maxc); } for (ib = 0; ib < nblk; ib++) { for (m = nimgs-1; m >= 0; m--) { rL[0] = ratm[0] + Ls[m*3+0]; rL[1] = ratm[1] + Ls[m*3+1]; rL[2] = ratm[2] + Ls[m*3+2]; for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) { dr[0] = coords[0*ngrids+i] - rL[0]; dr[1] = coords[1*ngrids+i] - rL[1]; dr[2] = coords[2*ngrids+i] - rL[2]; rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2]; for (j = 0; j < np; j++) { arr = p_exp[j] * rr; if (arr-logcoeff[j] < EXPCUTOFF) { non0table[ib*nbas+bas_id] = MIN(ALL_IMAGES, m+1); goto next_blk; } } } } non0table[ib*nbas+bas_id] = 0; next_blk:; } } } } static void _copy(double complex *out, double *ao_k, size_t ngrids, size_t bgrids, int nkpts, int ncomp, int nao, int ncol) { int i, j, k, ic; double complex *pout; double *ao_r, *ao_i; size_t blksize = ncomp * ncol * bgrids; for (k = 0; k < nkpts; k++) { ao_r = ao_k + k*2 * blksize; ao_i = ao_k +(k*2+1) * blksize; for (ic = 0; ic < ncomp; ic++) { pout = out + (k * ncomp + ic) * nao * ngrids; for (j = 0; j < ncol; j++) { for (i = 0; i < bgrids; i++) { pout[j*ngrids+i] = (ao_r[j*bgrids+i] + ao_i[j*bgrids+i]*_Complex_I); } } ao_r += ncol * bgrids; ao_i += ncol * bgrids; } } } // grid2atm[nimgs,xyz,grid_id] static void _fill_grid2atm(double *grid2atm, double *min_grid2atm, double *coord, double *Ls, double *r_atm, int atm_imag_max, size_t bgrids, size_t ngrids, int nimgs) { size_t ig, m; double rL[3]; double dist; double dist_min; for (m = 0; m < nimgs; m++) { if ((m < atm_imag_max || atm_imag_max == ALL_IMAGES)) { rL[0] = r_atm[0] + Ls[m*3+0]; rL[1] = r_atm[1] + Ls[m*3+1]; rL[2] = r_atm[2] + Ls[m*3+2]; dist_min = 1e9; for (ig = 0; ig < bgrids; ig++) { grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - rL[0]; grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - rL[1]; grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - rL[2]; dist = (grid2atm[0*BLKSIZE+ig]*grid2atm[0*BLKSIZE+ig] + grid2atm[1*BLKSIZE+ig]*grid2atm[1*BLKSIZE+ig] + grid2atm[2*BLKSIZE+ig]*grid2atm[2*BLKSIZE+ig]); dist_min = MIN(dist, dist_min); } min_grid2atm[m] = sqrt(dist_min); } grid2atm += 3*BLKSIZE; } } void PBCeval_cart_iter(FPtr_eval feval, FPtr_exp fexp, size_t nao, size_t ngrids, size_t bgrids, size_t offao, int param[], int *shls_slice, int *ao_loc, double *buf, double *Ls, double complex *expLk, int nimgs, int nkpts, int di_max, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nkpts2 = nkpts * OF_CMPLX; int i, j, k, l, np, nc, atm_id, bas_id, deg, ao_id; int iL, iL0, iLcount, dimc; int grid2atm_atm_id, count; double fac; double *p_exp, *pcoeff, *pcoord, *pao, *ri; double *grid2atm = buf; // shape [nimgs,3,bgrids] double *eprim = grid2atm + nimgs*3*BLKSIZE; double *aobuf = eprim + NPRIMAX*BLKSIZE*2; double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids; double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX; double complex *zLk_buf = (double complex *)Lk_buf; double *min_grid2atm = Lk_buf + IMGBLK * nkpts * OF_CMPLX; double *pexpLk; int img_idx[nimgs]; int atm_imag_max[natm]; for (i = 0; i < natm; i++) { atm_imag_max[i] = 0; } for (bas_id = sh0; bas_id < sh1; bas_id++) { atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]); } grid2atm_atm_id = -1; for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = (l+1)*(l+2)/2; dimc = nc*deg * ncomp * bgrids; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (grid2atm_atm_id != atm_id) { _fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri, atm_imag_max[atm_id], bgrids, ngrids, nimgs); grid2atm_atm_id = atm_id; } for (i = 0; i < nkpts2*dimc; i++) { aobufk[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); count = 0; for (iL = iL0; iL < iL0+iLcount; iL++) { pcoord = grid2atm + iL * 3*BLKSIZE; if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) && (min_grid2atm[iL] < rcut[bas_id]) && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) { pao = aobuf + count * dimc; (*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*deg, bgrids, bgrids); img_idx[count] = iL; count += 1; } } if (count > 0) { if (img_idx[count-1] != iL0 + count-1) { // some images are skipped for (i = 0; i < count; i++) { j = img_idx[i]; for (k = 0; k < nkpts; k++) { zLk_buf[i*nkpts+k] = expLk[j*nkpts+k]; } } pexpLk = Lk_buf; } else { pexpLk = (double *)(expLk + nkpts * iL0); } dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count, &D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc); } } _copy(ao+ao_id*ngrids+offao, aobufk, ngrids, bgrids, nkpts, ncomp, nao, nc*deg); } } void PBCeval_sph_iter(FPtr_eval feval, FPtr_exp fexp, size_t nao, size_t ngrids, size_t bgrids, size_t offao, int param[], int *shls_slice, int *ao_loc, double *buf, double *Ls, double complex *expLk, int nimgs, int nkpts, int di_max, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nkpts2 = nkpts * OF_CMPLX; int i, j, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id; int iL, iL0, iLcount, dimc; int grid2atm_atm_id, count; double fac; double *p_exp, *pcoeff, *pcoord, *pcart, *pao, *ri; double *grid2atm = buf; // shape [nimgs,3,bgrids] double *eprim = grid2atm + nimgs*3*BLKSIZE; double *aobuf = eprim + NPRIMAX*BLKSIZE*2; double *aobufk = aobuf + IMGBLK*ncomp*di_max*bgrids; double *Lk_buf = aobufk + nkpts*ncomp*di_max*bgrids * OF_CMPLX; double complex *zLk_buf = (double complex *)Lk_buf; double *cart_gto = Lk_buf + IMGBLK * nkpts * OF_CMPLX; double *min_grid2atm = cart_gto + ncomp*NCTR_CART*bgrids; double *pexpLk; int img_idx[nimgs]; int atm_imag_max[natm]; for (i = 0; i < natm; i++) { atm_imag_max[i] = 0; } for (bas_id = sh0; bas_id < sh1; bas_id++) { atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; atm_imag_max[atm_id] = MAX(atm_imag_max[atm_id], non0table[bas_id]); } grid2atm_atm_id = -1; for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = l * 2 + 1; dcart = (l+1)*(l+2)/2; dimc = nc*deg * ncomp * bgrids; fac = CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (grid2atm_atm_id != atm_id) { _fill_grid2atm(grid2atm, min_grid2atm, coord, Ls, ri, atm_imag_max[atm_id], bgrids, ngrids, nimgs); grid2atm_atm_id = atm_id; } NPdset0(aobufk, ((size_t)nkpts2) * dimc); for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); count = 0; for (iL = iL0; iL < iL0+iLcount; iL++) { pcoord = grid2atm + iL * 3*BLKSIZE; if ((iL < non0table[bas_id] || non0table[bas_id] == ALL_IMAGES) && (min_grid2atm[iL] < rcut[bas_id]) && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac)) { pao = aobuf + ((size_t)count) * dimc; if (l <= 1) { // s, p functions (*feval)(pao, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); } else { (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); pcart = cart_gto; for (i = 0; i < ncomp * nc; i++) { CINTc2s_ket_sph1(pao, pcart, bgrids, bgrids, l); pao += deg * bgrids; pcart += dcart * bgrids; } } img_idx[count] = iL; count++; } } if (count > 0) { if (img_idx[count-1] != iL0 + count-1) { // some images are skipped for (i = 0; i < count; i++) { j = img_idx[i]; for (k = 0; k < nkpts; k++) { zLk_buf[i*nkpts+k] = expLk[j*nkpts+k]; } } pexpLk = Lk_buf; } else { pexpLk = (double *)(expLk + nkpts * iL0); } dgemm_(&TRANS_N, &TRANS_T, &dimc, &nkpts2, &count, &D1, aobuf, &dimc, pexpLk, &nkpts2, &D1, aobufk, &dimc); } } _copy(ao+ao_id*ngrids+offao, aobufk, ngrids, bgrids, nkpts, ncomp, nao, nc*deg); } } int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas); /* * blksize <= 1024 to avoid stack overflow * * non0table[ngrids/blksize,natm] is the T/F table for ao values to * screen the ao evaluation for each shell */ void PBCeval_loop(void (*fiter)(), FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; int i; int di_max = 0; for (i = shls_slice[0]; i < shls_slice[1]; i++) { di_max = MAX(di_max, ao_loc[i+1] - ao_loc[i]); } #pragma omp parallel { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const size_t nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff, bgrids; size_t bufsize =((nimgs*3 + NPRIMAX*2 + nkpts *param[POS_E1]*param[TENSOR]*di_max * OF_CMPLX + IMGBLK*param[POS_E1]*param[TENSOR]*di_max + param[POS_E1]*param[TENSOR]*NCTR_CART) * BLKSIZE + nkpts * IMGBLK * OF_CMPLX + nimgs); double *buf = malloc(sizeof(double) * bufsize); #pragma omp for nowait schedule(dynamic, 1) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; ib = k - iloc * nblk; ip = ib * BLKSIZE; aoff = (ao_loc[ish] - ao_loc[sh0]) * Ngrids + ip; bgrids = MIN(ngrids-ip, BLKSIZE); (*fiter)(feval, fexp, nao, Ngrids, bgrids, aoff, param, shloc+iloc, ao_loc, buf, Ls, expLk, nimgs, nkpts, di_max, ao, coord+ip, rcut, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } } void PBCeval_cart_drv(FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { PBCeval_loop(PBCeval_cart_iter, feval, fexp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCeval_sph_drv(FPtr_eval feval, FPtr_exp fexp, int ngrids, int param[], int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { PBCeval_loop(PBCeval_sph_iter, feval, fexp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv0(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 1}; PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv0(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 1}; PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv1(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 4}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv1(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 4}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv1, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv2(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 10}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv2(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 10}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv2, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv3(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 20}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv3(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 20}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv3, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart_deriv4(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 35}; PBCeval_cart_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph_deriv4(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 35}; PBCeval_sph_drv(GTOshell_eval_grid_cart_deriv4, GTOprim_exp, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_cart(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { // int param[] = {1, 1}; // PBCeval_cart_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, // ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, // ao, coord, rcut, non0table, atm, natm, bas, nbas, env); PBCGTOval_cart_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_sph(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { // int param[] = {1, 1}; // PBCeval_sph_drv(GTOshell_eval_grid_cart, GTOcontract_exp0, // ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, // ao, coord, rcut, non0table, atm, natm, bas, nbas, env); PBCGTOval_sph_deriv0(ngrids, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_ip_cart(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 3}; PBCeval_cart_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); } void PBCGTOval_ip_sph(int ngrids, int *shls_slice, int *ao_loc, double *Ls, int nimgs, double complex *expLk, int nkpts, double complex *ao, double *coord, double *rcut, unsigned char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int param[] = {1, 3}; PBCeval_sph_drv(GTOshell_eval_grid_ip_cart, GTOcontract_exp1, ngrids, param, shls_slice, ao_loc, Ls, nimgs, expLk, nkpts, ao, coord, rcut, non0table, atm, natm, bas, nbas, env); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, and false is returned. bool CheckConstraintExpression(Expr *CE); bool CalculateConstraintSatisfaction(ConceptDecl *NamedConcept, MultiLevelTemplateArgumentList &MLTAL, Expr *ConstraintExpr, bool &IsSatisfied); /// Check that the associated constraints of a template declaration match the /// associated constraints of an older declaration of which it is a /// redeclaration. bool CheckRedeclarationConstraintMatch(TemplateParameterList *Old, TemplateParameterList *New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, SourceLocation ConceptNameLoc, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, TemplateDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. struct OpenMPDeclareVariantCtsSelectorData { OMPDeclareVariantAttr::CtxSelectorSetType CtxSet = OMPDeclareVariantAttr::CtxSetUnknown; OMPDeclareVariantAttr::CtxSelectorType Ctx = OMPDeclareVariantAttr::CtxUnknown; MutableArrayRef<StringRef> ImplVendors; ExprResult CtxScore; explicit OpenMPDeclareVariantCtsSelectorData() = default; explicit OpenMPDeclareVariantCtsSelectorData( OMPDeclareVariantAttr::CtxSelectorSetType CtxSet, OMPDeclareVariantAttr::CtxSelectorType Ctx, MutableArrayRef<StringRef> ImplVendors, ExprResult CtxScore) : CtxSet(CtxSet), Ctx(Ctx), ImplVendors(ImplVendors), CtxScore(CtxScore) {} }; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, SourceRange SR, const Sema::OpenMPDeclareVariantCtsSelectorData &Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_unop__minv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_uint64_uint64 // op(A') function: GB_unop_tran__minv_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 64) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 64) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 64) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gr3_mc.c
/*!\file gr3_mc.c * * Optimized OpenMP implementation of the marching cubes algorithm. * * This code is based on Paul Bourke's Marching Cubes implementation * (http://paulbourke.net/geometry/polygonise/) * * Creates an indexed mesh to reduce the number of vertices to calculate. * This is done by caching generated vertices depending on their location. * Multiple threads create independent meshes. * Caches values between adjacent cubes. * * Fabian Beule * 2014-02-10 */ #include <stdlib.h> #include <string.h> #include <math.h> #include "gr3.h" #include "gr3_mc_data.h" #ifdef _OPENMP #include <omp.h> #endif #define ABS(x) ((x) < 0 ? -(x) : (x)) #define INDEX(x, y, z) ((x)*mcdata.stride[0] + (y)*mcdata.stride[1] + (z)*mcdata.stride[2]) #define IDX2D(y, z) ((y)*mcdata.dim[2] + (z)) /* speedup does not grow much with a high number of threads */ #define THREADLIMIT 16 /* for smaller function headers */ typedef struct { const GR3_MC_DTYPE *data; GR3_MC_DTYPE isolevel; int dim[3]; int stride[3]; double step[3]; double offset[3]; } mcdata_t; /* calculate the gradient via difference qoutient */ static gr3_coord_t getgrad(mcdata_t mcdata, int x, int y, int z) { int v[3]; int neigh[3][2]; int i; gr3_coord_t n; v[0] = x; v[1] = y; v[2] = z; for (i = 0; i < 3; i++) { if (v[i] > 0) neigh[i][0] = v[i] - 1; else neigh[i][0] = v[i]; if (v[i] < mcdata.dim[i] - 1) neigh[i][1] = v[i] + 1; else neigh[i][1] = v[i]; } n.x = (float)(mcdata.data[INDEX(neigh[0][1], y, z)] - mcdata.data[INDEX(neigh[0][0], y, z)]) / (neigh[0][1] - neigh[0][0]) / mcdata.step[0]; n.y = (float)(mcdata.data[INDEX(x, neigh[1][1], z)] - mcdata.data[INDEX(x, neigh[1][0], z)]) / (neigh[1][1] - neigh[1][0]) / mcdata.step[1]; n.z = (float)(mcdata.data[INDEX(x, y, neigh[2][1])] - mcdata.data[INDEX(x, y, neigh[2][0])]) / (neigh[2][1] - neigh[2][0]) / mcdata.step[2]; return n; } /* interpolate points and calulate normals */ static void interpolate(mcdata_t mcdata, int px, int py, int pz, GR3_MC_DTYPE v1, int qx, int qy, int qz, GR3_MC_DTYPE v2, gr3_coord_t *p, gr3_coord_t *n) { double mu; gr3_coord_t n1, n2; double norm; if (ABS(mcdata.isolevel - v1) < 0.00001) mu = 0.0; else if (ABS(mcdata.isolevel - v2) < 0.00001) mu = 1.0; else if (ABS(v1 - v2) < 0.00001) mu = 0.5; else mu = 1.0 * (mcdata.isolevel - v1) / (v2 - v1); p->x = (px + mu * (qx - px)) * mcdata.step[0] + mcdata.offset[0]; p->y = (py + mu * (qy - py)) * mcdata.step[1] + mcdata.offset[1]; p->z = (pz + mu * (qz - pz)) * mcdata.step[2] + mcdata.offset[2]; n1 = getgrad(mcdata, px, py, pz); n2 = getgrad(mcdata, qx, qy, qz); n->x = -(n1.x + mu * (n2.x - n1.x)); n->y = -(n1.y + mu * (n2.y - n1.y)); n->z = -(n1.z + mu * (n2.z - n1.z)); norm = sqrt(n->x * n->x + n->y * n->y + n->z * n->z); if (norm > 0.0) { n->x /= norm; n->y /= norm; n->z /= norm; } } /*! * marching cubes algorithm for one x-layer. * created vertices are cached between calls using vindex. * vindex associates the intersected edge with the vertex index. * the edge is identified by its location (low, high), direction (x, y, z) * and coordinates (py, pz) of its starting point. * direction and location are the first index: * (x, y_low, z_low, y_high, z_high) (see mc_edgeprop) * second index is py * mcdata.dim[1] + pz. * py and pz are the coordinates of the lower one of both edge vertices */ static void layer(mcdata_t mcdata, int x, int **vindex, unsigned int *num_vertices, gr3_coord_t **vertices, gr3_coord_t **normals, unsigned int *vertcapacity, unsigned int *num_faces, unsigned int **indices, unsigned int *facecapacity) { int i, j; int y, z; int cubeindex; GR3_MC_DTYPE cubeval[8]; /* also cache between adjacent cubes */ for (y = 0; y < mcdata.dim[1] - 1; y++) { /* init z-cache */ for (i = 0; i < 4; i++) { int zi = mc_zvertices[0][i]; cubeval[mc_zvertices[1][i]] = mcdata.data[INDEX(x + mc_cubeverts[zi][0], y + mc_cubeverts[zi][1], 0 + mc_cubeverts[zi][2])]; } for (z = 0; z < mcdata.dim[2] - 1; z++) { cubeindex = 0; /* shift old values (z-cache) */ for (i = 0; i < 4; i++) { int zi = mc_zvertices[0][i]; cubeval[zi] = cubeval[mc_zvertices[1][i]]; if (cubeval[zi] < mcdata.isolevel) { cubeindex |= 1 << zi; } } /* read new cube values */ for (i = 0; i < 4; i++) { int zi = mc_zvertices[1][i]; cubeval[zi] = mcdata.data[INDEX(x + mc_cubeverts[zi][0], y + mc_cubeverts[zi][1], z + mc_cubeverts[zi][2])]; if (cubeval[zi] < mcdata.isolevel) { cubeindex |= 1 << zi; } } if (cubeindex != 0 && cubeindex != 255) { /* create triangles */ for (i = 0; i < mc_tricount[cubeindex]; i++) { if (*facecapacity <= *num_faces) { (*facecapacity) = (unsigned int)(*num_faces * 1.5) + 50; *indices = realloc(*indices, (*facecapacity) * 3 * sizeof(int)); } /* create triangle vertices */ for (j = 0; j < 3; j++) { int trival = mc_tritable[cubeindex][i * 3 + j]; const int *edge = mc_cubeedges[trival]; int dir = mc_edgeprop[trival]; int px = x + mc_cubeverts[edge[0]][0]; int py = y + mc_cubeverts[edge[0]][1]; int pz = z + mc_cubeverts[edge[0]][2]; /* lookup if vertex already exists */ int node = vindex[dir][IDX2D(py, pz)]; if (node < 0) { /* it does not, create it */ GR3_MC_DTYPE v1 = cubeval[edge[0]]; GR3_MC_DTYPE v2 = cubeval[edge[1]]; if (*vertcapacity <= *num_vertices) { (*vertcapacity) = (unsigned int)(*num_vertices * 1.5) + 50; *vertices = realloc(*vertices, (*vertcapacity) * sizeof(gr3_coord_t)); *normals = realloc(*normals, (*vertcapacity) * sizeof(gr3_coord_t)); } node = *num_vertices; interpolate(mcdata, px, py, pz, v1, x + mc_cubeverts[edge[1]][0], y + mc_cubeverts[edge[1]][1], z + mc_cubeverts[edge[1]][2], v2, *vertices + node, *normals + node); vindex[dir][IDX2D(py, pz)] = node; (*num_vertices)++; } /* add vertex index to the element array */ (*indices)[*num_faces * 3 + j] = node; } (*num_faces)++; } } } } } /*! * handle consecutive calls to layer */ static void layerblock(mcdata_t mcdata, int from, int to, unsigned int *num_vertices, gr3_coord_t **vertices, gr3_coord_t **normals, unsigned int *num_faces, unsigned int **faces) { int x; int y; int z; unsigned int vertcapacity; unsigned int facecapacity; /* cache for the vertex indices of the x-layer * [x, y_bot, z_bot, y_top, z_top] */ int *vindex[5], *ntmp; *num_vertices = 0; vertcapacity = 0; *vertices = NULL; *normals = NULL; *num_faces = 0; facecapacity = 0; *faces = NULL; vindex[0] = malloc(5 * mcdata.dim[1] * mcdata.dim[2] * sizeof(int)); vindex[1] = vindex[0] + mcdata.dim[1] * mcdata.dim[2]; vindex[2] = vindex[0] + 2 * mcdata.dim[1] * mcdata.dim[2]; vindex[3] = vindex[0] + 3 * mcdata.dim[1] * mcdata.dim[2]; vindex[4] = vindex[0] + 4 * mcdata.dim[1] * mcdata.dim[2]; for (y = 0; y < mcdata.dim[1]; y++) { for (z = 0; z < mcdata.dim[2]; z++) { vindex[0][IDX2D(y, z)] = -1; vindex[3][IDX2D(y, z)] = -1; vindex[4][IDX2D(y, z)] = -1; } } /* * iterate layer-by-layer through the data * create an indexed mesh * indices are cached in vindex[direction of the edge][y, z] * the top cache becomes the bottom in the next iterarion */ for (x = from; x < to; x++) { ntmp = vindex[1]; vindex[1] = vindex[3]; vindex[3] = ntmp; ntmp = vindex[2]; vindex[2] = vindex[4]; vindex[4] = ntmp; for (y = 0; y < mcdata.dim[1]; y++) { for (z = 0; z < mcdata.dim[2]; z++) { vindex[0][IDX2D(y, z)] = -1; vindex[3][IDX2D(y, z)] = -1; vindex[4][IDX2D(y, z)] = -1; } } layer(mcdata, x, vindex, num_vertices, vertices, normals, &vertcapacity, num_faces, faces, &facecapacity); } free(vindex[0]); } /*! * Create an isosurface (as indexed mesh) from voxel data * with the marching cubes algorithm. * This function manages the parallelization: * Divide the data into blocks along the x-axis. Allocate memory, * call layerblock and merge the individual meshes into a single one. * * \param [in] data the volume (voxel) data * \param [in] isolevel value where the isosurface will be extracted * \param [in] dim_x number of elements in x-direction * \param [in] dim_y number of elements in y-direction * \param [in] dim_z number of elements in z-direction * \param [in] stride_x number of elements to step when traversing * the data in x-direction * \param [in] stride_y number of elements to step when traversing * the data in y-direction * \param [in] stride_z number of elements to step when traversing * the data in z-direction * \param [in] step_x distance between the voxels in x-direction * \param [in] step_y distance between the voxels in y-direction * \param [in] step_z distance between the voxels in z-direction * \param [in] offset_x coordinate origin * \param [in] offset_y coordinate origin * \param [in] offset_z coordinate origin * \param [out] num_vertices number of vertices created * \param [out] vertices array of vertex coordinates * \param [out] normals array of vertex normal vectors * \param [out] num_indices number of indices created * (3 times the number of triangles) * \param [out] indices array of vertex indices that make the triangles */ GR3API void gr3_triangulateindexed(const GR3_MC_DTYPE *data, GR3_MC_DTYPE isolevel, unsigned int dim_x, unsigned int dim_y, unsigned int dim_z, unsigned int stride_x, unsigned int stride_y, unsigned int stride_z, double step_x, double step_y, double step_z, double offset_x, double offset_y, double offset_z, unsigned int *num_vertices, gr3_coord_t **vertices, gr3_coord_t **normals, unsigned int *num_indices, unsigned int **indices) { int num_threads; unsigned int num_faces; unsigned int *num_t_vertices, *num_t_faces, **t_faces; gr3_coord_t **t_vertices, **t_normals; unsigned int *vertblock, *faceblock; mcdata_t mcdata; #if defined(_OPENMP) && defined(THREADLIMIT) int max_threads; max_threads = omp_get_max_threads(); if (max_threads > THREADLIMIT) omp_set_num_threads(THREADLIMIT); #endif if (stride_x == 0) stride_x = dim_z * dim_y; if (stride_y == 0) stride_y = dim_z; if (stride_z == 0) stride_z = 1; mcdata.data = data; mcdata.isolevel = isolevel; mcdata.dim[0] = dim_x; mcdata.dim[1] = dim_y; mcdata.dim[2] = dim_z; mcdata.stride[0] = stride_x; mcdata.stride[1] = stride_y; mcdata.stride[2] = stride_z; mcdata.step[0] = step_x; mcdata.step[1] = step_y; mcdata.step[2] = step_z; mcdata.offset[0] = offset_x; mcdata.offset[1] = offset_y; mcdata.offset[2] = offset_z; *num_vertices = 0; *vertices = NULL; *normals = NULL; *num_indices = 0; *indices = NULL; #ifdef _OPENMP #pragma omp parallel default(none) \ shared(num_threads, num_t_vertices, t_vertices, t_normals, num_t_faces, t_faces, mcdata, vertblock, faceblock, \ num_vertices, num_faces, vertices, normals, indices) #endif { int thread_id; unsigned int from, to; unsigned int i; #ifdef _OPENMP #pragma omp single #endif { /* allocate temporary memory for each thread */ #ifdef _OPENMP num_threads = omp_get_num_threads(); #else num_threads = 1; #endif num_t_vertices = malloc(num_threads * sizeof(unsigned int)); t_vertices = malloc(num_threads * sizeof(gr3_coord_t *)); t_normals = malloc(num_threads * sizeof(gr3_coord_t *)); num_t_faces = malloc(num_threads * sizeof(unsigned int)); t_faces = malloc(num_threads * sizeof(unsigned int *)); } /* create a mesh per thread */ #ifdef _OPENMP thread_id = omp_get_thread_num(); #else thread_id = 0; #endif from = thread_id * (mcdata.dim[0] - 1) / num_threads; to = (thread_id + 1) * (mcdata.dim[0] - 1) / num_threads; num_t_vertices[thread_id] = 0; t_vertices[thread_id] = NULL; t_normals[thread_id] = NULL; num_t_faces[thread_id] = 0; t_faces[thread_id] = NULL; layerblock(mcdata, from, to, num_t_vertices + thread_id, t_vertices + thread_id, t_normals + thread_id, num_t_faces + thread_id, t_faces + thread_id); #ifdef _OPENMP #pragma omp barrier #pragma omp single #endif { /* calculate beginning indices of thread blocks */ vertblock = malloc((num_threads + 1) * sizeof(unsigned int)); vertblock[0] = 0; faceblock = malloc((num_threads + 1) * sizeof(unsigned int)); faceblock[0] = 0; for (i = 0; i < (unsigned int)num_threads; i++) { vertblock[i + 1] = vertblock[i] + num_t_vertices[i]; faceblock[i + 1] = faceblock[i] + num_t_faces[i]; } *num_vertices = vertblock[num_threads]; num_faces = faceblock[num_threads]; *vertices = realloc(*vertices, *num_vertices * sizeof(gr3_coord_t)); *normals = realloc(*normals, *num_vertices * sizeof(gr3_coord_t)); *indices = realloc(*indices, num_faces * 3 * sizeof(unsigned int)); } /* copy thread meshes into the arrays */ memmove(*vertices + vertblock[thread_id], t_vertices[thread_id], num_t_vertices[thread_id] * sizeof(gr3_coord_t)); memmove(*normals + vertblock[thread_id], t_normals[thread_id], num_t_vertices[thread_id] * sizeof(gr3_coord_t)); /* translate thread indices to global indices */ for (i = 0; i < num_t_faces[thread_id]; i++) { (*indices)[(faceblock[thread_id] + i) * 3 + 0] = t_faces[thread_id][i * 3 + 0] + vertblock[thread_id]; (*indices)[(faceblock[thread_id] + i) * 3 + 1] = t_faces[thread_id][i * 3 + 1] + vertblock[thread_id]; (*indices)[(faceblock[thread_id] + i) * 3 + 2] = t_faces[thread_id][i * 3 + 2] + vertblock[thread_id]; } free(t_vertices[thread_id]); free(t_normals[thread_id]); free(t_faces[thread_id]); } free(faceblock); free(vertblock); free(t_faces); free(num_t_faces); free(t_normals); free(t_vertices); free(num_t_vertices); *num_indices = num_faces * 3; #if defined(_OPENMP) && defined(THREADLIMIT) omp_set_num_threads(max_threads); #endif } /*! * Create an isosurface (as mesh) from voxel data * with the marching cubes algorithm. * This function calls gr3_triangulateindexed and copies the values. * * \param [in] data the volume (voxel) data * \param [in] isolevel value where the isosurface will be extracted * \param [in] dim_x number of elements in x-direction * \param [in] dim_y number of elements in y-direction * \param [in] dim_z number of elements in z-direction * \param [in] stride_x number of elements to step when traversing * the data in x-direction * \param [in] stride_y number of elements to step when traversing * the data in y-direction * \param [in] stride_z number of elements to step when traversing * the data in z-direction * \param [in] step_x distance between the voxels in x-direction * \param [in] step_y distance between the voxels in y-direction * \param [in] step_z distance between the voxels in z-direction * \param [in] offset_x coordinate origin * \param [in] offset_y coordinate origin * \param [in] offset_z coordinate origin * \param [out] triangles_p array of triangle data * * \returns the number of triangles created */ GR3API unsigned int gr3_triangulate(const GR3_MC_DTYPE *data, GR3_MC_DTYPE isolevel, unsigned int dim_x, unsigned int dim_y, unsigned int dim_z, unsigned int stride_x, unsigned int stride_y, unsigned int stride_z, double step_x, double step_y, double step_z, double offset_x, double offset_y, double offset_z, gr3_triangle_t **triangles_p) { unsigned int num_vertices; gr3_coord_t *vertices, *normals; unsigned int num_indices; unsigned int *indices; unsigned int i, j; #if defined(_OPENMP) && defined(THREADLIMIT) int max_threads; max_threads = omp_get_max_threads(); if (max_threads > THREADLIMIT) omp_set_num_threads(THREADLIMIT); #endif gr3_triangulateindexed(data, isolevel, dim_x, dim_y, dim_z, stride_x, stride_y, stride_z, step_x, step_y, step_z, offset_x, offset_y, offset_z, &num_vertices, &vertices, &normals, &num_indices, &indices); *triangles_p = malloc(num_indices / 3 * sizeof(gr3_triangle_t)); #ifdef _OPENMP #pragma omp parallel for default(none) private(j) shared(num_indices, triangles_p, indices, vertices, normals) #endif for (i = 0; i < num_indices / 3; i++) { for (j = 0; j < 3; j++) { (*triangles_p)[i].vertex[j] = vertices[indices[i * 3 + j]]; (*triangles_p)[i].normal[j] = normals[indices[i * 3 + j]]; } } free(vertices); free(normals); free(indices); #if defined(_OPENMP) && defined(THREADLIMIT) omp_set_num_threads(max_threads); #endif return num_indices / 3; }
global.h
#ifndef _GLOBAL_H_ #define _GLOBAL_H_ #pragma omp declare target const double aaa = 11.; #pragma omp end declare target #endif