source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
BPMaximumMatching.h | #ifndef BP_MAXIMUM_MATCHING_H
#define BP_MAXIMUM_MATCHING_H
#include "CombBLAS/CombBLAS.h"
#include <mpi.h>
#include <sys/time.h>
#include <iostream>
#include <functional>
#include <algorithm>
#include <vector>
#include <string>
#include <sstream>
#include "MatchingDefs.h"
namespace combblas {
/**
* Create a boolean matrix A (not necessarily a permutation matrix)
* Input: ri: a dense vector (actual values in FullyDistVec should be IT)
* ncol: number of columns in the output matrix A
* Output: a boolean matrix A with m=size(ri) and n=ncol (input)
and A[k,ri[k]]=1
* This can be done by Matlab like constructor, no?
*/
template <class IT, class DER>
SpParMat<IT, bool, DER> PermMat (const FullyDistVec<IT,IT> & ri, const IT ncol)
{
IT procsPerRow = ri.commGrid->GetGridCols(); // the number of processor in a row of processor grid
IT procsPerCol = ri.commGrid->GetGridRows(); // the number of processor in a column of processor grid
IT global_nrow = ri.TotalLength();
IT global_ncol = ncol;
IT m_perprocrow = global_nrow / procsPerRow;
IT n_perproccol = global_ncol / procsPerCol;
// The indices for FullyDistVec are offset'd to 1/p pieces
// The matrix indices are offset'd to 1/sqrt(p) pieces
// Add the corresponding offset before sending the data
std::vector< std::vector<IT> > rowid(procsPerRow); // rowid in the local matrix of each vector entry
std::vector< std::vector<IT> > colid(procsPerRow); // colid in the local matrix of each vector entry
IT locvec = ri.arr.size(); // nnz in local vector
IT roffset = ri.RowLenUntil(); // the number of vector elements in this processor row before the current processor
for(typename std::vector<IT>::size_type i=0; i< (unsigned)locvec; ++i)
{
if(ri.arr[i]>=0 && ri.arr[i]<ncol) // this specialized for matching. TODO: make it general purpose by passing a function
{
IT rowrec = (n_perproccol!=0) ? std::min(ri.arr[i] / n_perproccol, procsPerRow-1) : (procsPerRow-1);
// ri's numerical values give the colids and its local indices give rowids
rowid[rowrec].push_back( i + roffset);
colid[rowrec].push_back(ri.arr[i] - (rowrec * n_perproccol));
}
}
int * sendcnt = new int[procsPerRow];
int * recvcnt = new int[procsPerRow];
for(IT i=0; i<procsPerRow; ++i)
{
sendcnt[i] = rowid[i].size();
}
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, ri.commGrid->GetRowWorld()); // share the counts
int * sdispls = new int[procsPerRow]();
int * rdispls = new int[procsPerRow]();
partial_sum(sendcnt, sendcnt+procsPerRow-1, sdispls+1);
partial_sum(recvcnt, recvcnt+procsPerRow-1, rdispls+1);
IT p_nnz = accumulate(recvcnt,recvcnt+procsPerRow, static_cast<IT>(0));
IT * p_rows = new IT[p_nnz];
IT * p_cols = new IT[p_nnz];
IT * senddata = new IT[locvec];
for(int i=0; i<procsPerRow; ++i)
{
copy(rowid[i].begin(), rowid[i].end(), senddata+sdispls[i]);
std::vector<IT>().swap(rowid[i]); // clear memory of rowid
}
MPI_Alltoallv(senddata, sendcnt, sdispls, MPIType<IT>(), p_rows, recvcnt, rdispls, MPIType<IT>(), ri.commGrid->GetRowWorld());
for(int i=0; i<procsPerRow; ++i)
{
copy(colid[i].begin(), colid[i].end(), senddata+sdispls[i]);
std::vector<IT>().swap(colid[i]); // clear memory of colid
}
MPI_Alltoallv(senddata, sendcnt, sdispls, MPIType<IT>(), p_cols, recvcnt, rdispls, MPIType<IT>(), ri.commGrid->GetRowWorld());
delete [] senddata;
std::tuple<IT,IT,bool> * p_tuples = new std::tuple<IT,IT,bool>[p_nnz];
for(IT i=0; i< p_nnz; ++i)
{
p_tuples[i] = make_tuple(p_rows[i], p_cols[i], 1);
}
DeleteAll(p_rows, p_cols);
// Now create the local matrix
IT local_nrow = ri.MyRowLength();
int my_proccol = ri.commGrid->GetRankInProcRow();
IT local_ncol = (my_proccol<(procsPerCol-1))? (n_perproccol) : (global_ncol - (n_perproccol*(procsPerCol-1)));
// infer the concrete type SpMat<IT,IT>
typedef typename create_trait<DER, IT, bool>::T_inferred DER_IT;
DER_IT * PSeq = new DER_IT();
PSeq->Create( p_nnz, local_nrow, local_ncol, p_tuples); // deletion of tuples[] is handled by SpMat::Create
SpParMat<IT,bool,DER_IT> P (PSeq, ri.commGrid);
//Par_DCSC_Bool P (PSeq, ri.commGrid);
return P;
}
/***************************************************************************
// Augment a matching by a set of vertex-disjoint augmenting paths.
// The paths are explored level-by-level similar to the level-synchronous BFS
// This approach is more effecient when we have many short augmenting paths
***************************************************************************/
template <typename IT>
void AugmentLevel(FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row, FullyDistVec<IT, IT>& parentsRow, FullyDistVec<IT, IT>& leaves)
{
IT nrow = mateRow2Col.TotalLength();
IT ncol = mateCol2Row.TotalLength();
FullyDistSpVec<IT, IT> col(leaves, [](IT leaf){return leaf!=-1;});
FullyDistSpVec<IT, IT> row(mateRow2Col.getcommgrid(), nrow);
FullyDistSpVec<IT, IT> nextcol(col.getcommgrid(), ncol);
while(col.getnnz()!=0)
{
row = col.Invert(nrow);
row = EWiseApply<IT>(row, parentsRow,
[](IT root, IT parent){return parent;},
[](IT root, IT parent){return true;},
false, (IT)-1);
col = row.Invert(ncol); // children array
nextcol = EWiseApply<IT>(col, mateCol2Row,
[](IT child, IT mate){return mate;},
[](IT child, IT mate){return mate!=-1;},
false, (IT)-1);
mateRow2Col.Set(row);
mateCol2Row.Set(col);
col = nextcol;
}
}
/***************************************************************************
// Augment a matching by a set of vertex-disjoint augmenting paths.
// An MPI processor is responsible for a complete path.
// This approach is more effecient when we have few long augmenting paths
// We used one-sided MPI. Any PGAS language should be fine as well.
// This function is not thread safe, hence multithreading is not used here
***************************************************************************/
template <typename IT>
void AugmentPath(FullyDistVec<IT, IT>& mateRow2Col, FullyDistVec<IT, IT>& mateCol2Row,FullyDistVec<IT, IT>& parentsRow, FullyDistVec<IT, IT>& leaves)
{
MPI_Win win_mateRow2Col, win_mateCol2Row, win_parentsRow;
MPI_Win_create((IT*)mateRow2Col.GetLocArr(), mateRow2Col.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, mateRow2Col.commGrid->GetWorld(), &win_mateRow2Col);
MPI_Win_create((IT*)mateCol2Row.GetLocArr(), mateCol2Row.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, mateCol2Row.commGrid->GetWorld(), &win_mateCol2Row);
MPI_Win_create((IT*)parentsRow.GetLocArr(), parentsRow.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, parentsRow.commGrid->GetWorld(), &win_parentsRow);
IT* leaves_ptr = (IT*) leaves.GetLocArr();
//MPI_Win_fence(0, win_mateRow2Col);
//MPI_Win_fence(0, win_mateCol2Row);
//MPI_Win_fence(0, win_parentsRow);
IT row, col=100, nextrow;
int owner_row, owner_col;
IT locind_row, locind_col;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
for(IT i=0; i<leaves.LocArrSize(); i++)
{
int depth=0;
row = *(leaves_ptr+i);
while(row != - 1)
{
owner_row = mateRow2Col.Owner(row, locind_row);
MPI_Win_lock(MPI_LOCK_SHARED, owner_row, 0, win_parentsRow);
MPI_Get(&col, 1, MPIType<IT>(), owner_row, locind_row, 1, MPIType<IT>(), win_parentsRow);
MPI_Win_unlock(owner_row, win_parentsRow);
owner_col = mateCol2Row.Owner(col, locind_col);
MPI_Win_lock(MPI_LOCK_SHARED, owner_col, 0, win_mateCol2Row);
MPI_Fetch_and_op(&row, &nextrow, MPIType<IT>(), owner_col, locind_col, MPI_REPLACE, win_mateCol2Row);
MPI_Win_unlock(owner_col, win_mateCol2Row);
MPI_Win_lock(MPI_LOCK_SHARED, owner_row, 0, win_mateRow2Col);
MPI_Put(&col, 1, MPIType<IT>(), owner_row, locind_row, 1, MPIType<IT>(), win_mateRow2Col);
MPI_Win_unlock(owner_row, win_mateRow2Col); // we need this otherwise col might get overwritten before communication!
row = nextrow;
}
}
//MPI_Win_fence(0, win_mateRow2Col);
//MPI_Win_fence(0, win_mateCol2Row);
//MPI_Win_fence(0, win_parentsRow);
MPI_Win_free(&win_mateRow2Col);
MPI_Win_free(&win_mateCol2Row);
MPI_Win_free(&win_parentsRow);
}
// Maximum cardinality matching
// Output: mateRow2Col and mateRow2Col
template <typename IT, typename NT,typename DER>
void maximumMatching(SpParMat < IT, NT, DER > & A, FullyDistVec<IT, IT>& mateRow2Col,
FullyDistVec<IT, IT>& mateCol2Row, bool prune=true, bool randMM = false, bool maximizeWeight = false)
{
typedef VertexTypeMM <IT> VertexType;
int nthreads=1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
PreAllocatedSPA<VertexType> SPA(A.seq(), nthreads*4);
double tstart = MPI_Wtime();
int nprocs, myrank;
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
IT nrow = A.getnrow();
IT ncol = A.getncol();
FullyDistSpVec<IT, VertexType> fringeRow(A.getcommgrid(), nrow);
FullyDistSpVec<IT, IT> umFringeRow(A.getcommgrid(), nrow);
FullyDistVec<IT, IT> leaves ( A.getcommgrid(), ncol, (IT) -1);
std::vector<std::vector<double> > timing;
std::vector<int> layers;
std::vector<int64_t> phaseMatched;
double t1, time_search, time_augment, time_phase;
bool matched = true;
int phase = 0;
int totalLayer = 0;
IT numUnmatchedCol;
MPI_Win winLeaves;
MPI_Win_create((IT*)leaves.GetLocArr(), leaves.LocArrSize() * sizeof(IT), sizeof(IT), MPI_INFO_NULL, A.getcommgrid()->GetWorld(), &winLeaves);
while(matched)
{
time_phase = MPI_Wtime();
std::vector<double> phase_timing(8,0);
leaves.Apply ( [](IT val){return (IT) -1;});
FullyDistVec<IT, IT> parentsRow ( A.getcommgrid(), nrow, (IT) -1);
FullyDistSpVec<IT, VertexType> fringeCol(A.getcommgrid(), ncol);
fringeCol = EWiseApply<VertexType>(fringeCol, mateCol2Row,
[](VertexType vtx, IT mate){return vtx;},
[](VertexType vtx, IT mate){return mate==-1;},
true, VertexType());
if(randMM) //select rand
{
fringeCol.ApplyInd([](VertexType vtx, IT idx){return VertexType(idx,idx,GlobalMT.rand());});
}
else
{
fringeCol.ApplyInd([](VertexType vtx, IT idx){return VertexType(idx,idx);});
}
++phase;
numUnmatchedCol = fringeCol.getnnz();
int layer = 0;
time_search = MPI_Wtime();
while(fringeCol.getnnz() > 0)
{
layer++;
t1 = MPI_Wtime();
//TODO: think about this semiring
if(maximizeWeight)
SpMV<WeightMaxMMSR<NT, VertexType>>(A, fringeCol, fringeRow, false, SPA);
else
SpMV<Select2ndMinSR<NT, VertexType>>(A, fringeCol, fringeRow, false, SPA);
phase_timing[0] += MPI_Wtime()-t1;
// remove vertices already having parents
t1 = MPI_Wtime();
fringeRow = EWiseApply<VertexType>(fringeRow, parentsRow,
[](VertexType vtx, IT parent){return vtx;},
[](VertexType vtx, IT parent){return parent==-1;},
false, VertexType());
// Set parent pointer
parentsRow.EWiseApply(fringeRow,
[](IT dval, VertexType svtx){return svtx.parent;},
[](IT dval, VertexType svtx){return true;},
false, VertexType());
umFringeRow = EWiseApply<IT>(fringeRow, mateRow2Col,
[](VertexType vtx, IT mate){return vtx.root;},
[](VertexType vtx, IT mate){return mate==-1;},
false, VertexType());
phase_timing[1] += MPI_Wtime()-t1;
IT nnz_umFringeRow = umFringeRow.getnnz(); // careful about this timing
t1 = MPI_Wtime();
if(nnz_umFringeRow >0)
{
/*
if(nnz_umFringeRow < 25*nprocs)
{
leaves.GSet(umFringeRow,
[](IT valRoot, IT idxLeaf){return valRoot;},
[](IT valRoot, IT idxLeaf){return idxLeaf;},
winLeaves);
// There might be a bug here. It does not return the same output for different number of processes
// e.g., check with g7jac200sc.mtx matrix
}
else*/
{
FullyDistSpVec<IT, IT> temp1(A.getcommgrid(), ncol);
temp1 = umFringeRow.Invert(ncol);
leaves.Set(temp1);
}
}
phase_timing[2] += MPI_Wtime()-t1;
// matched row vertices in the the fringe
fringeRow = EWiseApply<VertexType>(fringeRow, mateRow2Col,
[](VertexType vtx, IT mate){return VertexType(mate, vtx.root);},
[](VertexType vtx, IT mate){return mate!=-1;},
false, VertexType());
t1 = MPI_Wtime();
if(nnz_umFringeRow>0 && prune)
{
fringeRow.FilterByVal (umFringeRow,[](VertexType vtx){return vtx.root;}, false);
}
double tprune = MPI_Wtime()-t1;
phase_timing[3] += tprune;
// Go to matched column from matched row in the fringe. parent is automatically set to itself.
t1 = MPI_Wtime();
fringeCol = fringeRow.Invert(ncol,
[](VertexType& vtx, const IT & index){return vtx.parent;},
[](VertexType& vtx, const IT & index){return vtx;},
[](VertexType& vtx1, VertexType& vtx2){return vtx1;});
phase_timing[4] += MPI_Wtime()-t1;
}
time_search = MPI_Wtime() - time_search;
phase_timing[5] += time_search;
IT numMatchedCol = leaves.Count([](IT leaf){return leaf!=-1;});
phaseMatched.push_back(numMatchedCol);
time_augment = MPI_Wtime();
if (numMatchedCol== 0) matched = false;
else
{
if(numMatchedCol < (2* nprocs * nprocs))
AugmentPath(mateRow2Col, mateCol2Row,parentsRow, leaves);
else
AugmentLevel(mateRow2Col, mateCol2Row,parentsRow, leaves);
}
time_augment = MPI_Wtime() - time_augment;
phase_timing[6] += time_augment;
time_phase = MPI_Wtime() - time_phase;
phase_timing[7] += time_phase;
timing.push_back(phase_timing);
totalLayer += layer;
layers.push_back(layer);
}
MPI_Win_free(&winLeaves);
//isMaximalmatching(A, mateRow2Col, mateCol2Row, unmatchedRow, unmatchedCol);
//isMatching(mateCol2Row, mateRow2Col); //todo there is a better way to check this
// print statistics
double combTime;
#ifdef TIMING
if(myrank == 0)
{
std::cout << "****** maximum matching runtime ********\n";
std::cout << std::endl;
std::cout << "========================================================================\n";
std::cout << " BFS Search \n";
std::cout << "===================== ==================================================\n";
std::cout << "Phase Layer Match SpMV EWOpp CmUqL Prun CmMC BFS Aug Total\n";
std::cout << "===================== ===================================================\n";
std::vector<double> totalTimes(timing[0].size(),0);
int nphases = timing.size();
for(int i=0; i<timing.size(); i++)
{
printf(" %3d %3d %8lld ", i+1, layers[i], phaseMatched[i]);
for(int j=0; j<timing[i].size(); j++)
{
totalTimes[j] += timing[i][j];
//timing[i][j] /= timing[i].back();
printf("%.2lf ", timing[i][j]);
}
printf("\n");
}
std::cout << "-----------------------------------------------------------------------\n";
std::cout << "Phase Layer UnMat SpMV EWOpp CmUqL Prun CmMC BFS Aug Total \n";
std::cout << "-----------------------------------------------------------------------\n";
combTime = totalTimes.back();
printf(" %3d %3d %8lld ", nphases, totalLayer/nphases, numUnmatchedCol);
for(int j=0; j<totalTimes.size()-1; j++)
{
printf("%.2lf ", totalTimes[j]);
}
printf("%.2lf\n", combTime);
}
#endif
IT nrows=A.getnrow();
IT matchedRow = mateRow2Col.Count([](IT mate){return mate!=-1;});
#ifdef DETAIL_STATS
if(myrank==0)
{
std::cout << "***Final Maximum Matching***\n";
std::cout << "***Total-Rows Matched-Rows Total Time***\n";
printf("%lld %lld %lf \n",nrows, matchedRow, combTime);
printf("matched rows: %lld , which is: %lf percent \n",matchedRow, 100*(double)matchedRow/(nrows));
std::cout << "-------------------------------------------------------\n\n";
}
#endif
}
}
#endif
|
host_as_target.c | // Check that specifying device as omp_get_initial_device():
// - Doesn't cause the runtime to fail.
// - Offloads code to the host.
// - Doesn't transfer data. In this case, just check that neither host data nor
// default device data are affected by the specified transfers.
// - Works whether it's specified directly or as the default device.
// RUN: %libomptarget-compile-run-and-check-generic
#include <stdio.h>
#include <omp.h>
static void check(char *X, int Dev) {
printf(" host X = %c\n", *X);
#pragma omp target device(Dev)
printf("device X = %c\n", *X);
}
#define CHECK_DATA() check(&X, DevDefault)
int main(void) {
int DevDefault = omp_get_default_device();
int DevInit = omp_get_initial_device();
//--------------------------------------------------
// Initialize data on the host and default device.
//--------------------------------------------------
// CHECK: host X = h
// CHECK-NEXT: device X = d
char X = 'd';
#pragma omp target enter data map(to:X)
X = 'h';
CHECK_DATA();
//--------------------------------------------------
// Check behavior when specifying host directly.
//--------------------------------------------------
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target device(DevInit) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target teams device(DevInit) num_teams(1) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure
// how to check that it actually pushes to the initial device.
#pragma omp target teams device(DevInit) num_teams(1)
#pragma omp distribute
for (int i = 0; i < 2; ++i)
;
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target data device(DevInit) map(always,tofrom:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target enter data device(DevInit) map(always,to:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target exit data device(DevInit) map(always,from:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update device(DevInit) to(X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update device(DevInit) from(X)
;
CHECK_DATA();
//--------------------------------------------------
// Check behavior when device defaults to host.
//--------------------------------------------------
omp_set_default_device(DevInit);
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target teams num_teams(1) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure
// how to check that it actually pushes to the initial device.
#pragma omp target teams num_teams(1)
#pragma omp distribute
for (int i = 0; i < 2; ++i)
;
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target data map(always,tofrom:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target enter data map(always,to:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target exit data map(always,from:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update to(X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update from(X)
;
CHECK_DATA();
return 0;
}
|
GB_binop__first_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__first_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_bool)
// A*D function (colscale): GB (_AxD__first_bool)
// D*A function (rowscale): GB (_DxB__first_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__first_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__first_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_bool)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = aij
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__first_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
rowcolsum.c | #include <stdio.h>
#include <omp.h>
int main() {
int N,i,j,chunk,nthreads;
// printf("Enter the no of rows and columns: \n");
scanf("%d",&N);
int a[N][N], rowSum[N],colSum[N];
// printf("Enter the matrix: \n");
for(i=0; i<N; i++) {
for(j=0; j<N; j++) {
scanf("%d",&a[i][j]);
}
}
for(i=0;i<N;i++) {
rowSum[i]=colSum[i]=0;
}
// printf("Enter chunk size and no of threads: ");
scanf("%d %d",&chunk,&nthreads);
#pragma omp parallel shared(a,rowSum,colSum,N) private(i,j) num_threads(nthreads)
#pragma omp for collapse(2) schedule(static,chunk) ordered
for(i=0;i<N;i++) {
for(j=0;j<N;j++) {
rowSum[i]+=a[i][j];
colSum[j]+=a[i][j];
}
}
for(i=0;i<N;i++) {
printf("row[%d] sum = %d \t col[%d] sum = %d \n",i,rowSum[i],i,colSum[i]);
}
return 0;
}
|
star2d2r.c | #define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
#pragma scop
for (int t = 0; t < timestep; t++)
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-2][j] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i][j-2] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i][j+2] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+2][j];
#pragma endscop
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-2][j] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i][j-2] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i][j+2] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+2][j];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
convolutiondepthwise_5x5_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__fp16 bias0_data[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
__fp16* outptr1 = out.row<__fp16>(1);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
const __fp16* r5 = img0.row<const __fp16>(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
const __fp16* bias0_data_ptr = bias ? bias + g * 8 : bias0_data;
asm volatile(
"prfm pldl1keep, [%18, #512] \n"
"ld1 {v31.8h}, [%18] \n" // sum13
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" // r0_0123
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w0_0123
"mov v24.16b, v31.16b \n" // sum00
"mov v25.16b, v31.16b \n" // sum01
"mov v26.16b, v31.16b \n" // sum02
"mov v27.16b, v31.16b \n" // sum03
"fmla v24.8h, v16.8h, v0.8h \n"
"fmla v25.8h, v17.8h, v0.8h \n"
"fmla v26.8h, v18.8h, v0.8h \n"
"fmla v27.8h, v19.8h, v0.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2] \n" // r0_4567
"fmla v24.8h, v17.8h, v1.8h \n"
"fmla v25.8h, v18.8h, v1.8h \n"
"fmla v26.8h, v19.8h, v1.8h \n"
"fmla v27.8h, v20.8h, v1.8h \n"
"mov v28.16b, v31.16b \n" // sum10
"fmla v24.8h, v18.8h, v2.8h \n"
"fmla v25.8h, v19.8h, v2.8h \n"
"fmla v26.8h, v20.8h, v2.8h \n"
"fmla v27.8h, v21.8h, v2.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w04 w1_012
"fmla v24.8h, v19.8h, v3.8h \n"
"fmla v25.8h, v20.8h, v3.8h \n"
"fmla v26.8h, v21.8h, v3.8h \n"
"fmla v27.8h, v22.8h, v3.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // r1_0123
"fmla v24.8h, v20.8h, v4.8h \n"
"fmla v25.8h, v21.8h, v4.8h \n"
"fmla v26.8h, v22.8h, v4.8h \n"
"fmla v27.8h, v23.8h, v4.8h \n"
"mov v29.16b, v31.16b \n" // sum11
"mov v30.16b, v31.16b \n" // sum12
"fmla v28.8h, v8.8h, v0.8h \n"
"fmla v29.8h, v9.8h, v0.8h \n"
"fmla v30.8h, v10.8h, v0.8h \n"
"fmla v31.8h, v11.8h, v0.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3] \n" // r1_4567
"fmla v28.8h, v9.8h, v1.8h \n"
"fmla v29.8h, v10.8h, v1.8h \n"
"fmla v30.8h, v11.8h, v1.8h \n"
"fmla v31.8h, v12.8h, v1.8h \n"
"fmla v28.8h, v10.8h, v2.8h \n"
"fmla v29.8h, v11.8h, v2.8h \n"
"fmla v30.8h, v12.8h, v2.8h \n"
"fmla v31.8h, v13.8h, v2.8h \n"
"fmla v28.8h, v11.8h, v3.8h \n"
"fmla v29.8h, v12.8h, v3.8h \n"
"fmla v30.8h, v13.8h, v3.8h \n"
"fmla v31.8h, v14.8h, v3.8h \n"
"fmla v28.8h, v12.8h, v4.8h \n"
"fmla v29.8h, v13.8h, v4.8h \n"
"fmla v30.8h, v14.8h, v4.8h \n"
"fmla v31.8h, v15.8h, v4.8h \n"
"fmla v24.8h, v8.8h, v5.8h \n"
"fmla v25.8h, v9.8h, v5.8h \n"
"fmla v26.8h, v10.8h, v5.8h \n"
"fmla v27.8h, v11.8h, v5.8h \n"
"fmla v24.8h, v9.8h, v6.8h \n"
"fmla v25.8h, v10.8h, v6.8h \n"
"fmla v26.8h, v11.8h, v6.8h \n"
"fmla v27.8h, v12.8h, v6.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w1_34 w2_01
"fmla v24.8h, v10.8h, v7.8h \n"
"fmla v25.8h, v11.8h, v7.8h \n"
"fmla v26.8h, v12.8h, v7.8h \n"
"fmla v27.8h, v13.8h, v7.8h \n"
"fmla v24.8h, v11.8h, v0.8h \n"
"fmla v25.8h, v12.8h, v0.8h \n"
"fmla v26.8h, v13.8h, v0.8h \n"
"fmla v27.8h, v14.8h, v0.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n" // r2_0123
"fmla v24.8h, v12.8h, v1.8h \n"
"fmla v25.8h, v13.8h, v1.8h \n"
"fmla v26.8h, v14.8h, v1.8h \n"
"fmla v27.8h, v15.8h, v1.8h \n"
"fmla v28.8h, v16.8h, v5.8h \n"
"fmla v29.8h, v17.8h, v5.8h \n"
"fmla v30.8h, v18.8h, v5.8h \n"
"fmla v31.8h, v19.8h, v5.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" // r2_4567
"fmla v28.8h, v17.8h, v6.8h \n"
"fmla v29.8h, v18.8h, v6.8h \n"
"fmla v30.8h, v19.8h, v6.8h \n"
"fmla v31.8h, v20.8h, v6.8h \n"
"fmla v28.8h, v18.8h, v7.8h \n"
"fmla v29.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v7.8h \n"
"fmla v31.8h, v21.8h, v7.8h \n"
"fmla v28.8h, v19.8h, v0.8h \n"
"fmla v29.8h, v20.8h, v0.8h \n"
"fmla v30.8h, v21.8h, v0.8h \n"
"fmla v31.8h, v22.8h, v0.8h \n"
"fmla v28.8h, v20.8h, v1.8h \n"
"fmla v29.8h, v21.8h, v1.8h \n"
"fmla v30.8h, v22.8h, v1.8h \n"
"fmla v31.8h, v23.8h, v1.8h \n"
"fmla v24.8h, v16.8h, v2.8h \n"
"fmla v25.8h, v17.8h, v2.8h \n"
"fmla v26.8h, v18.8h, v2.8h \n"
"fmla v27.8h, v19.8h, v2.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w2_234 w30
"fmla v24.8h, v17.8h, v3.8h \n"
"fmla v25.8h, v18.8h, v3.8h \n"
"fmla v26.8h, v19.8h, v3.8h \n"
"fmla v27.8h, v20.8h, v3.8h \n"
"fmla v24.8h, v18.8h, v4.8h \n"
"fmla v25.8h, v19.8h, v4.8h \n"
"fmla v26.8h, v20.8h, v4.8h \n"
"fmla v27.8h, v21.8h, v4.8h \n"
"fmla v24.8h, v19.8h, v5.8h \n"
"fmla v25.8h, v20.8h, v5.8h \n"
"fmla v26.8h, v21.8h, v5.8h \n"
"fmla v27.8h, v22.8h, v5.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%5], #64 \n" // r3_0123
"fmla v24.8h, v20.8h, v6.8h \n"
"fmla v25.8h, v21.8h, v6.8h \n"
"fmla v26.8h, v22.8h, v6.8h \n"
"fmla v27.8h, v23.8h, v6.8h \n"
"fmla v28.8h, v8.8h, v2.8h \n"
"fmla v29.8h, v9.8h, v2.8h \n"
"fmla v30.8h, v10.8h, v2.8h \n"
"fmla v31.8h, v11.8h, v2.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%5] \n" // r3_4567
"fmla v28.8h, v9.8h, v3.8h \n"
"fmla v29.8h, v10.8h, v3.8h \n"
"fmla v30.8h, v11.8h, v3.8h \n"
"fmla v31.8h, v12.8h, v3.8h \n"
"fmla v28.8h, v10.8h, v4.8h \n"
"fmla v29.8h, v11.8h, v4.8h \n"
"fmla v30.8h, v12.8h, v4.8h \n"
"fmla v31.8h, v13.8h, v4.8h \n"
"fmla v28.8h, v11.8h, v5.8h \n"
"fmla v29.8h, v12.8h, v5.8h \n"
"fmla v30.8h, v13.8h, v5.8h \n"
"fmla v31.8h, v14.8h, v5.8h \n"
"fmla v28.8h, v12.8h, v6.8h \n"
"fmla v29.8h, v13.8h, v6.8h \n"
"fmla v30.8h, v14.8h, v6.8h \n"
"fmla v31.8h, v15.8h, v6.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w3_1234
"fmla v24.8h, v8.8h, v7.8h \n"
"fmla v25.8h, v9.8h, v7.8h \n"
"fmla v26.8h, v10.8h, v7.8h \n"
"fmla v27.8h, v11.8h, v7.8h \n"
"fmla v24.8h, v9.8h, v0.8h \n"
"fmla v25.8h, v10.8h, v0.8h \n"
"fmla v26.8h, v11.8h, v0.8h \n"
"fmla v27.8h, v12.8h, v0.8h \n"
"fmla v24.8h, v10.8h, v1.8h \n"
"fmla v25.8h, v11.8h, v1.8h \n"
"fmla v26.8h, v12.8h, v1.8h \n"
"fmla v27.8h, v13.8h, v1.8h \n"
"fmla v24.8h, v11.8h, v2.8h \n"
"fmla v25.8h, v12.8h, v2.8h \n"
"fmla v26.8h, v13.8h, v2.8h \n"
"fmla v27.8h, v14.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" // r4_0123
"fmla v24.8h, v12.8h, v3.8h \n"
"fmla v25.8h, v13.8h, v3.8h \n"
"fmla v26.8h, v14.8h, v3.8h \n"
"fmla v27.8h, v15.8h, v3.8h \n"
"fmla v28.8h, v16.8h, v7.8h \n"
"fmla v29.8h, v17.8h, v7.8h \n"
"fmla v30.8h, v18.8h, v7.8h \n"
"fmla v31.8h, v19.8h, v7.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" // r4_4567
"fmla v28.8h, v17.8h, v0.8h \n"
"fmla v29.8h, v18.8h, v0.8h \n"
"fmla v30.8h, v19.8h, v0.8h \n"
"fmla v31.8h, v20.8h, v0.8h \n"
"fmla v28.8h, v18.8h, v1.8h \n"
"fmla v29.8h, v19.8h, v1.8h \n"
"fmla v30.8h, v20.8h, v1.8h \n"
"fmla v31.8h, v21.8h, v1.8h \n"
"fmla v28.8h, v19.8h, v2.8h \n"
"fmla v29.8h, v20.8h, v2.8h \n"
"fmla v30.8h, v21.8h, v2.8h \n"
"fmla v31.8h, v22.8h, v2.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w4_0123
"fmla v28.8h, v20.8h, v3.8h \n"
"fmla v29.8h, v21.8h, v3.8h \n"
"fmla v30.8h, v22.8h, v3.8h \n"
"fmla v31.8h, v23.8h, v3.8h \n"
"fmla v24.8h, v16.8h, v4.8h \n"
"fmla v25.8h, v17.8h, v4.8h \n"
"fmla v26.8h, v18.8h, v4.8h \n"
"fmla v27.8h, v19.8h, v4.8h \n"
"fmla v24.8h, v17.8h, v5.8h \n"
"fmla v25.8h, v18.8h, v5.8h \n"
"fmla v26.8h, v19.8h, v5.8h \n"
"fmla v27.8h, v20.8h, v5.8h \n"
"fmla v24.8h, v18.8h, v6.8h \n"
"fmla v25.8h, v19.8h, v6.8h \n"
"fmla v26.8h, v20.8h, v6.8h \n"
"fmla v27.8h, v21.8h, v6.8h \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.8h}, [%8] \n" // w44
"fmla v24.8h, v19.8h, v7.8h \n"
"fmla v25.8h, v20.8h, v7.8h \n"
"fmla v26.8h, v21.8h, v7.8h \n"
"fmla v27.8h, v22.8h, v7.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%7], #64 \n" // r5_0123
"fmla v24.8h, v20.8h, v0.8h \n"
"fmla v25.8h, v21.8h, v0.8h \n"
"fmla v26.8h, v22.8h, v0.8h \n"
"fmla v27.8h, v23.8h, v0.8h \n"
"fmla v28.8h, v8.8h, v4.8h \n"
"fmla v29.8h, v9.8h, v4.8h \n"
"fmla v30.8h, v10.8h, v4.8h \n"
"fmla v31.8h, v11.8h, v4.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%7] \n" // r5_4567
"fmla v28.8h, v9.8h, v5.8h \n"
"fmla v29.8h, v10.8h, v5.8h \n"
"fmla v30.8h, v11.8h, v5.8h \n"
"fmla v31.8h, v12.8h, v5.8h \n"
"fmla v28.8h, v10.8h, v6.8h \n"
"fmla v29.8h, v11.8h, v6.8h \n"
"fmla v30.8h, v12.8h, v6.8h \n"
"fmla v31.8h, v13.8h, v6.8h \n"
"fmla v28.8h, v11.8h, v7.8h \n"
"fmla v29.8h, v12.8h, v7.8h \n"
"fmla v30.8h, v13.8h, v7.8h \n"
"fmla v31.8h, v14.8h, v7.8h \n"
"fmla v28.8h, v12.8h, v0.8h \n"
"fmla v29.8h, v13.8h, v0.8h \n"
"fmla v30.8h, v14.8h, v0.8h \n"
"fmla v31.8h, v15.8h, v0.8h \n"
"sub %8, %8, #384 \n" // k0 -= 24 * 8
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(k0) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(k0),
"r"(bias0_data_ptr) // %18
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.8h, v17.8h}, [%2], #32 \n" // r0_01
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w0_0123
"mov v28.16b, %18.16b \n" // sum00
"mov v29.16b, %18.16b \n" // sum01
"fmla v28.8h, v16.8h, v0.8h \n"
"fmla v29.8h, v17.8h, v0.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%2] \n" // r0_2345
"mov v30.16b, %18.16b \n" // sum10
"mov v31.16b, %18.16b \n" // sum11
"fmla v28.8h, v17.8h, v1.8h \n"
"fmla v29.8h, v18.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w04 w1_012
"fmla v28.8h, v18.8h, v2.8h \n"
"fmla v29.8h, v19.8h, v2.8h \n"
"fmla v28.8h, v19.8h, v3.8h \n"
"fmla v29.8h, v20.8h, v3.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v22.8h, v23.8h}, [%3], #32 \n" // r1_01
"fmla v28.8h, v20.8h, v4.8h \n"
"fmla v29.8h, v21.8h, v4.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%3] \n" // r1_2345
"fmla v30.8h, v22.8h, v0.8h \n"
"fmla v31.8h, v23.8h, v0.8h \n"
"fmla v30.8h, v23.8h, v1.8h \n"
"fmla v31.8h, v24.8h, v1.8h \n"
"fmla v30.8h, v24.8h, v2.8h \n"
"fmla v31.8h, v25.8h, v2.8h \n"
"fmla v30.8h, v25.8h, v3.8h \n"
"fmla v31.8h, v26.8h, v3.8h \n"
"fmla v30.8h, v26.8h, v4.8h \n"
"fmla v31.8h, v27.8h, v4.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w1_34 w2_01
"fmla v28.8h, v22.8h, v5.8h \n"
"fmla v29.8h, v23.8h, v5.8h \n"
"fmla v28.8h, v23.8h, v6.8h \n"
"fmla v29.8h, v24.8h, v6.8h \n"
"fmla v28.8h, v24.8h, v7.8h \n"
"fmla v29.8h, v25.8h, v7.8h \n"
"fmla v28.8h, v25.8h, v0.8h \n"
"fmla v29.8h, v26.8h, v0.8h \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.8h, v17.8h}, [%4], #32 \n" // r2_01
"fmla v28.8h, v26.8h, v1.8h \n"
"fmla v29.8h, v27.8h, v1.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%4] \n" // r2_2345
"fmla v30.8h, v16.8h, v5.8h \n"
"fmla v31.8h, v17.8h, v5.8h \n"
"fmla v30.8h, v17.8h, v6.8h \n"
"fmla v31.8h, v18.8h, v6.8h \n"
"fmla v30.8h, v18.8h, v7.8h \n"
"fmla v31.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v19.8h, v0.8h \n"
"fmla v31.8h, v20.8h, v0.8h \n"
"fmla v30.8h, v20.8h, v1.8h \n"
"fmla v31.8h, v21.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w2_234 w30
"fmla v28.8h, v16.8h, v2.8h \n"
"fmla v29.8h, v17.8h, v2.8h \n"
"fmla v28.8h, v17.8h, v3.8h \n"
"fmla v29.8h, v18.8h, v3.8h \n"
"fmla v28.8h, v18.8h, v4.8h \n"
"fmla v29.8h, v19.8h, v4.8h \n"
"fmla v28.8h, v19.8h, v5.8h \n"
"fmla v29.8h, v20.8h, v5.8h \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v22.8h, v23.8h}, [%5], #32 \n" // r3_01
"fmla v28.8h, v20.8h, v6.8h \n"
"fmla v29.8h, v21.8h, v6.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%5] \n" // r3_2345
"fmla v30.8h, v22.8h, v2.8h \n"
"fmla v31.8h, v23.8h, v2.8h \n"
"fmla v30.8h, v23.8h, v3.8h \n"
"fmla v31.8h, v24.8h, v3.8h \n"
"fmla v30.8h, v24.8h, v4.8h \n"
"fmla v31.8h, v25.8h, v4.8h \n"
"fmla v30.8h, v25.8h, v5.8h \n"
"fmla v31.8h, v26.8h, v5.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w3_1234
"fmla v30.8h, v26.8h, v6.8h \n"
"fmla v31.8h, v27.8h, v6.8h \n"
"fmla v28.8h, v22.8h, v7.8h \n"
"fmla v29.8h, v23.8h, v7.8h \n"
"fmla v28.8h, v23.8h, v0.8h \n"
"fmla v29.8h, v24.8h, v0.8h \n"
"fmla v28.8h, v24.8h, v1.8h \n"
"fmla v29.8h, v25.8h, v1.8h \n"
"fmla v28.8h, v25.8h, v2.8h \n"
"fmla v29.8h, v26.8h, v2.8h \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.8h, v17.8h}, [%6], #32 \n" // r4_01
"fmla v28.8h, v26.8h, v3.8h \n"
"fmla v29.8h, v27.8h, v3.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%6] \n" // r4_2345
"fmla v30.8h, v16.8h, v7.8h \n"
"fmla v31.8h, v17.8h, v7.8h \n"
"fmla v30.8h, v17.8h, v0.8h \n"
"fmla v31.8h, v18.8h, v0.8h \n"
"fmla v30.8h, v18.8h, v1.8h \n"
"fmla v31.8h, v19.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w4_0123
"fmla v30.8h, v19.8h, v2.8h \n"
"fmla v31.8h, v20.8h, v2.8h \n"
"fmla v30.8h, v20.8h, v3.8h \n"
"fmla v31.8h, v21.8h, v3.8h \n"
"fmla v28.8h, v16.8h, v4.8h \n"
"fmla v29.8h, v17.8h, v4.8h \n"
"fmla v28.8h, v17.8h, v5.8h \n"
"fmla v29.8h, v18.8h, v5.8h \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.8h}, [%8] \n" // w44
"fmla v28.8h, v18.8h, v6.8h \n"
"fmla v29.8h, v19.8h, v6.8h \n"
"fmla v28.8h, v19.8h, v7.8h \n"
"fmla v29.8h, v20.8h, v7.8h \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v22.8h, v23.8h}, [%7], #32 \n" // r5_01
"fmla v28.8h, v20.8h, v0.8h \n"
"fmla v29.8h, v21.8h, v0.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%7] \n" // r5_2345
"fmla v30.8h, v22.8h, v4.8h \n"
"fmla v31.8h, v23.8h, v4.8h \n"
"fmla v30.8h, v23.8h, v5.8h \n"
"fmla v31.8h, v24.8h, v5.8h \n"
"fmla v30.8h, v24.8h, v6.8h \n"
"fmla v31.8h, v25.8h, v6.8h \n"
"fmla v30.8h, v25.8h, v7.8h \n"
"fmla v31.8h, v26.8h, v7.8h \n"
"fmla v30.8h, v26.8h, v0.8h \n"
"fmla v31.8h, v27.8h, v0.8h \n"
"sub %8, %8, #384 \n" // k0 -= 24 * 8
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
"st1 {v30.8h, v31.8h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(k0) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(k0),
"w"(_bias0) // %18
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.8h}, [%2], #16 \n" // r0_0
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%2] \n" // r0_1234
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w0_0123
"mov v30.16b, %18.16b \n" // sum00
"mov v31.16b, %18.16b \n" // sum10
"fmla v30.8h, v16.8h, v0.8h \n"
"fmla v30.8h, v17.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w04 w1_012
"fmla v30.8h, v18.8h, v2.8h \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v21.8h}, [%3], #16 \n" // r1_0
"fmla v30.8h, v19.8h, v3.8h \n"
"fmla v30.8h, v20.8h, v4.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%3] \n" // r1_1234
"fmla v31.8h, v21.8h, v0.8h \n"
"fmla v31.8h, v22.8h, v1.8h \n"
"fmla v31.8h, v23.8h, v2.8h \n"
"fmla v31.8h, v24.8h, v3.8h \n"
"fmla v31.8h, v25.8h, v4.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w1_34 w2_01
"fmla v30.8h, v21.8h, v5.8h \n"
"fmla v30.8h, v22.8h, v6.8h \n"
"fmla v30.8h, v23.8h, v7.8h \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.8h}, [%4], #16 \n" // r2_0
"fmla v30.8h, v24.8h, v0.8h \n"
"fmla v30.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%4] \n" // r2_1234
"fmla v31.8h, v16.8h, v5.8h \n"
"fmla v31.8h, v17.8h, v6.8h \n"
"fmla v31.8h, v18.8h, v7.8h \n"
"fmla v31.8h, v19.8h, v0.8h \n"
"fmla v31.8h, v20.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w2_234 w30
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v30.8h, v17.8h, v3.8h \n"
"fmla v30.8h, v18.8h, v4.8h \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.8h}, [%5], #16 \n" // r3_0
"fmla v30.8h, v19.8h, v5.8h \n"
"fmla v30.8h, v20.8h, v6.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%5] \n" // r3_1234
"fmla v31.8h, v21.8h, v2.8h \n"
"fmla v31.8h, v22.8h, v3.8h \n"
"fmla v31.8h, v23.8h, v4.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%8], #64 \n" // w3_1234
"fmla v31.8h, v24.8h, v5.8h \n"
"fmla v31.8h, v25.8h, v6.8h \n"
"fmla v30.8h, v21.8h, v7.8h \n"
"fmla v30.8h, v22.8h, v0.8h \n"
"fmla v30.8h, v23.8h, v1.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v16.8h}, [%6], #16 \n" // r4_0
"fmla v30.8h, v24.8h, v2.8h \n"
"fmla v30.8h, v25.8h, v3.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%6] \n" // r4_1234
"fmla v31.8h, v16.8h, v7.8h \n"
"fmla v31.8h, v17.8h, v0.8h \n"
"fmla v31.8h, v18.8h, v1.8h \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%8], #64 \n" // w4_0123
"fmla v31.8h, v19.8h, v2.8h \n"
"fmla v31.8h, v20.8h, v3.8h \n"
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.8h}, [%8] \n" // w44
"fmla v30.8h, v18.8h, v6.8h \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v21.8h}, [%7], #16 \n" // r5_0
"fmla v30.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v0.8h \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%7] \n" // r5_1234
"fmla v31.8h, v21.8h, v4.8h \n"
"fmla v31.8h, v22.8h, v5.8h \n"
"fmla v31.8h, v23.8h, v6.8h \n"
"fmla v31.8h, v24.8h, v7.8h \n"
"fmla v31.8h, v25.8h, v0.8h \n"
"sub %8, %8, #384 \n" // k0 -= 24 * 8
"st1 {v30.8h}, [%0], #16 \n"
"st1 {v31.8h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(k0) // %8
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(k0),
"w"(_bias0) // %18
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v30", "v31");
}
r0 += 4 * 8 + w * 8;
r1 += 4 * 8 + w * 8;
r2 += 4 * 8 + w * 8;
r3 += 4 * 8 + w * 8;
r4 += 4 * 8 + w * 8;
r5 += 4 * 8 + w * 8;
outptr0 += outw * 8;
outptr1 += outw * 8;
}
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r0_0123
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w0_0123
"mov v28.16b, %14.16b \n" // sum00
"mov v29.16b, %14.16b \n" // sum01
"mov v30.16b, %14.16b \n" // sum02
"mov v31.16b, %14.16b \n" // sum03
"fmla v28.8h, v12.8h, v0.8h \n"
"fmla v29.8h, v13.8h, v0.8h \n"
"fmla v30.8h, v14.8h, v0.8h \n"
"fmla v31.8h, v15.8h, v0.8h \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1] \n" // r0_4567
"fmla v28.8h, v13.8h, v1.8h \n"
"fmla v29.8h, v14.8h, v1.8h \n"
"fmla v30.8h, v15.8h, v1.8h \n"
"fmla v31.8h, v16.8h, v1.8h \n"
"fmla v28.8h, v14.8h, v2.8h \n"
"fmla v29.8h, v15.8h, v2.8h \n"
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v31.8h, v17.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w04 w1_012
"fmla v28.8h, v15.8h, v3.8h \n"
"fmla v29.8h, v16.8h, v3.8h \n"
"fmla v30.8h, v17.8h, v3.8h \n"
"fmla v31.8h, v18.8h, v3.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" // r1_0123
"fmla v28.8h, v16.8h, v4.8h \n"
"fmla v29.8h, v17.8h, v4.8h \n"
"fmla v30.8h, v18.8h, v4.8h \n"
"fmla v31.8h, v19.8h, v4.8h \n"
"fmla v28.8h, v20.8h, v5.8h \n"
"fmla v29.8h, v21.8h, v5.8h \n"
"fmla v30.8h, v22.8h, v5.8h \n"
"fmla v31.8h, v23.8h, v5.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%2] \n" // r1_4567
"fmla v28.8h, v21.8h, v6.8h \n"
"fmla v29.8h, v22.8h, v6.8h \n"
"fmla v30.8h, v23.8h, v6.8h \n"
"fmla v31.8h, v24.8h, v6.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w1_34 w2_01
"fmla v28.8h, v22.8h, v7.8h \n"
"fmla v29.8h, v23.8h, v7.8h \n"
"fmla v30.8h, v24.8h, v7.8h \n"
"fmla v31.8h, v25.8h, v7.8h \n"
"fmla v28.8h, v23.8h, v0.8h \n"
"fmla v29.8h, v24.8h, v0.8h \n"
"fmla v30.8h, v25.8h, v0.8h \n"
"fmla v31.8h, v26.8h, v0.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r2_0123
"fmla v28.8h, v24.8h, v1.8h \n"
"fmla v29.8h, v25.8h, v1.8h \n"
"fmla v30.8h, v26.8h, v1.8h \n"
"fmla v31.8h, v27.8h, v1.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3] \n" // r2_4567
"fmla v28.8h, v12.8h, v2.8h \n"
"fmla v29.8h, v13.8h, v2.8h \n"
"fmla v30.8h, v14.8h, v2.8h \n"
"fmla v31.8h, v15.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w2_234 w30
"fmla v28.8h, v13.8h, v3.8h \n"
"fmla v29.8h, v14.8h, v3.8h \n"
"fmla v30.8h, v15.8h, v3.8h \n"
"fmla v31.8h, v16.8h, v3.8h \n"
"fmla v28.8h, v14.8h, v4.8h \n"
"fmla v29.8h, v15.8h, v4.8h \n"
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v31.8h, v17.8h, v4.8h \n"
"fmla v28.8h, v15.8h, v5.8h \n"
"fmla v29.8h, v16.8h, v5.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"fmla v31.8h, v18.8h, v5.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n" // r3_0123
"fmla v28.8h, v16.8h, v6.8h \n"
"fmla v29.8h, v17.8h, v6.8h \n"
"fmla v30.8h, v18.8h, v6.8h \n"
"fmla v31.8h, v19.8h, v6.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w3_1234
"fmla v28.8h, v20.8h, v7.8h \n"
"fmla v29.8h, v21.8h, v7.8h \n"
"fmla v30.8h, v22.8h, v7.8h \n"
"fmla v31.8h, v23.8h, v7.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%4] \n" // r3_4567
"fmla v28.8h, v21.8h, v0.8h \n"
"fmla v29.8h, v22.8h, v0.8h \n"
"fmla v30.8h, v23.8h, v0.8h \n"
"fmla v31.8h, v24.8h, v0.8h \n"
"fmla v28.8h, v22.8h, v1.8h \n"
"fmla v29.8h, v23.8h, v1.8h \n"
"fmla v30.8h, v24.8h, v1.8h \n"
"fmla v31.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%5], #64 \n" // r4_0123
"fmla v28.8h, v23.8h, v2.8h \n"
"fmla v29.8h, v24.8h, v2.8h \n"
"fmla v30.8h, v25.8h, v2.8h \n"
"fmla v31.8h, v26.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w4_0123
"fmla v28.8h, v24.8h, v3.8h \n"
"fmla v29.8h, v25.8h, v3.8h \n"
"fmla v30.8h, v26.8h, v3.8h \n"
"fmla v31.8h, v27.8h, v3.8h \n"
"fmla v28.8h, v12.8h, v4.8h \n"
"fmla v29.8h, v13.8h, v4.8h \n"
"fmla v30.8h, v14.8h, v4.8h \n"
"fmla v31.8h, v15.8h, v4.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%5] \n" // r4_4567
"fmla v28.8h, v13.8h, v5.8h \n"
"fmla v29.8h, v14.8h, v5.8h \n"
"fmla v30.8h, v15.8h, v5.8h \n"
"fmla v31.8h, v16.8h, v5.8h \n"
"fmla v28.8h, v14.8h, v6.8h \n"
"fmla v29.8h, v15.8h, v6.8h \n"
"fmla v30.8h, v16.8h, v6.8h \n"
"fmla v31.8h, v17.8h, v6.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.8h}, [%6] \n" // w44
"fmla v28.8h, v15.8h, v7.8h \n"
"fmla v29.8h, v16.8h, v7.8h \n"
"fmla v30.8h, v17.8h, v7.8h \n"
"fmla v31.8h, v18.8h, v7.8h \n"
"fmla v28.8h, v16.8h, v0.8h \n"
"fmla v29.8h, v17.8h, v0.8h \n"
"fmla v30.8h, v18.8h, v0.8h \n"
"fmla v31.8h, v19.8h, v0.8h \n"
"sub %6, %6, #384 \n" // k0 -= 24 * 8
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(k0) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(k0),
"w"(_bias0) // %14
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.8h, v17.8h}, [%1], #32 \n" // r0_01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w0_0123
"mov v30.16b, %14.16b \n" // sum00
"mov v31.16b, %14.16b \n" // sum01
"fmla v30.8h, v16.8h, v0.8h \n"
"fmla v31.8h, v17.8h, v0.8h \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%1] \n" // r0_2345
"fmla v30.8h, v17.8h, v1.8h \n"
"fmla v31.8h, v18.8h, v1.8h \n"
"fmla v30.8h, v18.8h, v2.8h \n"
"fmla v31.8h, v19.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w04 w1_012
"fmla v30.8h, v19.8h, v3.8h \n"
"fmla v31.8h, v20.8h, v3.8h \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v22.8h, v23.8h}, [%2], #32 \n" // r1_01
"fmla v30.8h, v20.8h, v4.8h \n"
"fmla v31.8h, v21.8h, v4.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%2] \n" // r1_2345
"fmla v30.8h, v22.8h, v5.8h \n"
"fmla v31.8h, v23.8h, v5.8h \n"
"fmla v30.8h, v23.8h, v6.8h \n"
"fmla v31.8h, v24.8h, v6.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w1_34 w2_01
"fmla v30.8h, v24.8h, v7.8h \n"
"fmla v31.8h, v25.8h, v7.8h \n"
"fmla v30.8h, v25.8h, v0.8h \n"
"fmla v31.8h, v26.8h, v0.8h \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.8h, v17.8h}, [%3], #32 \n" // r2_01
"fmla v30.8h, v26.8h, v1.8h \n"
"fmla v31.8h, v27.8h, v1.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%3] \n" // r2_2345
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v31.8h, v17.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w2_234 w30
"fmla v30.8h, v17.8h, v3.8h \n"
"fmla v31.8h, v18.8h, v3.8h \n"
"fmla v30.8h, v18.8h, v4.8h \n"
"fmla v31.8h, v19.8h, v4.8h \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v22.8h, v23.8h}, [%4], #32 \n" // r3_01
"fmla v30.8h, v19.8h, v5.8h \n"
"fmla v31.8h, v20.8h, v5.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w3_1234
"fmla v30.8h, v20.8h, v6.8h \n"
"fmla v31.8h, v21.8h, v6.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%4] \n" // r3_2345
"fmla v30.8h, v22.8h, v7.8h \n"
"fmla v31.8h, v23.8h, v7.8h \n"
"fmla v30.8h, v23.8h, v0.8h \n"
"fmla v31.8h, v24.8h, v0.8h \n"
"fmla v30.8h, v24.8h, v1.8h \n"
"fmla v31.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.8h, v17.8h}, [%5], #32 \n" // r4_01
"fmla v30.8h, v25.8h, v2.8h \n"
"fmla v31.8h, v26.8h, v2.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w4_0123
"fmla v30.8h, v26.8h, v3.8h \n"
"fmla v31.8h, v27.8h, v3.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%5] \n" // r4_2345
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v31.8h, v17.8h, v4.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"fmla v31.8h, v18.8h, v5.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.8h}, [%6] \n" // w44
"fmla v30.8h, v18.8h, v6.8h \n"
"fmla v31.8h, v19.8h, v6.8h \n"
"fmla v30.8h, v19.8h, v7.8h \n"
"fmla v31.8h, v20.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v0.8h \n"
"fmla v31.8h, v21.8h, v0.8h \n"
"sub %6, %6, #384 \n" // k0 -= 24 * 8
"st1 {v30.8h, v31.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(k0) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(k0),
"w"(_bias0) // %14
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.8h}, [%1], #16 \n" // r0_0
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w0_0123
"mov v30.16b, %14.16b \n" // sum00
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%1] \n" // r0_1234
"fmla v30.8h, v16.8h, v0.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w04 w1_012
"fmla v30.8h, v17.8h, v1.8h \n"
"fmla v30.8h, v18.8h, v2.8h \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v21.8h}, [%2], #16 \n" // r1_0
"fmla v30.8h, v19.8h, v3.8h \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%2] \n" // r1_1234
"fmla v30.8h, v20.8h, v4.8h \n"
"fmla v30.8h, v21.8h, v5.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w1_34 w2_01
"fmla v30.8h, v22.8h, v6.8h \n"
"fmla v30.8h, v23.8h, v7.8h \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.8h}, [%3], #16 \n" // r2_0
"fmla v30.8h, v24.8h, v0.8h \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%3] \n" // r2_1234
"fmla v30.8h, v25.8h, v1.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w2_234 w30
"fmla v30.8h, v16.8h, v2.8h \n"
"fmla v30.8h, v17.8h, v3.8h \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v21.8h}, [%4], #16 \n" // r3_0
"fmla v30.8h, v18.8h, v4.8h \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%4] \n" // r3_1234
"fmla v30.8h, v19.8h, v5.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%6], #64 \n" // w3_1234
"fmla v30.8h, v20.8h, v6.8h \n"
"fmla v30.8h, v21.8h, v7.8h \n"
"fmla v30.8h, v22.8h, v0.8h \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.8h}, [%5], #16 \n" // r4_0
"fmla v30.8h, v23.8h, v1.8h \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%6], #64 \n" // w4_0123
"fmla v30.8h, v24.8h, v2.8h \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%5] \n" // r4_1234
"fmla v30.8h, v25.8h, v3.8h \n"
"fmla v30.8h, v16.8h, v4.8h \n"
"fmla v30.8h, v17.8h, v5.8h \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v0.8h}, [%6] \n" // w44
"fmla v30.8h, v18.8h, v6.8h \n"
"fmla v30.8h, v19.8h, v7.8h \n"
"fmla v30.8h, v20.8h, v0.8h \n"
"sub %6, %6, #384 \n" // k0 -= 24 * 8
"st1 {v30.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(k0) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(k0),
"w"(_bias0) // %14
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v30");
}
r0 += 4 * 8;
r1 += 4 * 8;
r2 += 4 * 8;
r3 += 4 * 8;
r4 += 4 * 8;
}
}
}
static void convdw5x5s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f);
const __fp16* k0 = kernel.row<const __fp16>(g);
__fp16* outptr0 = out.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(g);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* r3 = img0.row<const __fp16>(3);
const __fp16* r4 = img0.row<const __fp16>(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
float16x8_t _sum0 = _bias0;
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _k00 = vld1q_f16(k0);
float16x8_t _k01 = vld1q_f16(k0 + 8);
float16x8_t _k02 = vld1q_f16(k0 + 16);
float16x8_t _k03 = vld1q_f16(k0 + 24);
float16x8_t _k04 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k00, _r00);
_sum0 = vfmaq_f16(_sum0, _k01, _r01);
_sum0 = vfmaq_f16(_sum0, _k02, _r02);
_sum0 = vfmaq_f16(_sum0, _k03, _r03);
_sum0 = vfmaq_f16(_sum0, _k04, _r04);
float16x8_t _r10 = vld1q_f16(r1);
float16x8_t _r11 = vld1q_f16(r1 + 8);
float16x8_t _r12 = vld1q_f16(r1 + 16);
float16x8_t _r13 = vld1q_f16(r1 + 24);
float16x8_t _r14 = vld1q_f16(r1 + 32);
float16x8_t _k10 = vld1q_f16(k0);
float16x8_t _k11 = vld1q_f16(k0 + 8);
float16x8_t _k12 = vld1q_f16(k0 + 16);
float16x8_t _k13 = vld1q_f16(k0 + 24);
float16x8_t _k14 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k10, _r10);
_sum0 = vfmaq_f16(_sum0, _k11, _r11);
_sum0 = vfmaq_f16(_sum0, _k12, _r12);
_sum0 = vfmaq_f16(_sum0, _k13, _r13);
_sum0 = vfmaq_f16(_sum0, _k14, _r14);
float16x8_t _r20 = vld1q_f16(r2);
float16x8_t _r21 = vld1q_f16(r2 + 8);
float16x8_t _r22 = vld1q_f16(r2 + 16);
float16x8_t _r23 = vld1q_f16(r2 + 24);
float16x8_t _r24 = vld1q_f16(r2 + 32);
float16x8_t _k20 = vld1q_f16(k0);
float16x8_t _k21 = vld1q_f16(k0 + 8);
float16x8_t _k22 = vld1q_f16(k0 + 16);
float16x8_t _k23 = vld1q_f16(k0 + 24);
float16x8_t _k24 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k20, _r20);
_sum0 = vfmaq_f16(_sum0, _k21, _r21);
_sum0 = vfmaq_f16(_sum0, _k22, _r22);
_sum0 = vfmaq_f16(_sum0, _k23, _r23);
_sum0 = vfmaq_f16(_sum0, _k24, _r24);
float16x8_t _r30 = vld1q_f16(r3);
float16x8_t _r31 = vld1q_f16(r3 + 8);
float16x8_t _r32 = vld1q_f16(r3 + 16);
float16x8_t _r33 = vld1q_f16(r3 + 24);
float16x8_t _r34 = vld1q_f16(r3 + 32);
float16x8_t _k30 = vld1q_f16(k0);
float16x8_t _k31 = vld1q_f16(k0 + 8);
float16x8_t _k32 = vld1q_f16(k0 + 16);
float16x8_t _k33 = vld1q_f16(k0 + 24);
float16x8_t _k34 = vld1q_f16(k0 + 32);
k0 += 40;
_sum0 = vfmaq_f16(_sum0, _k30, _r30);
_sum0 = vfmaq_f16(_sum0, _k31, _r31);
_sum0 = vfmaq_f16(_sum0, _k32, _r32);
_sum0 = vfmaq_f16(_sum0, _k33, _r33);
_sum0 = vfmaq_f16(_sum0, _k34, _r34);
float16x8_t _r40 = vld1q_f16(r4);
float16x8_t _r41 = vld1q_f16(r4 + 8);
float16x8_t _r42 = vld1q_f16(r4 + 16);
float16x8_t _r43 = vld1q_f16(r4 + 24);
float16x8_t _r44 = vld1q_f16(r4 + 32);
float16x8_t _k40 = vld1q_f16(k0);
float16x8_t _k41 = vld1q_f16(k0 + 8);
float16x8_t _k42 = vld1q_f16(k0 + 16);
float16x8_t _k43 = vld1q_f16(k0 + 24);
float16x8_t _k44 = vld1q_f16(k0 + 32);
k0 -= 160;
_sum0 = vfmaq_f16(_sum0, _k40, _r40);
_sum0 = vfmaq_f16(_sum0, _k41, _r41);
_sum0 = vfmaq_f16(_sum0, _k42, _r42);
_sum0 = vfmaq_f16(_sum0, _k43, _r43);
_sum0 = vfmaq_f16(_sum0, _k44, _r44);
vst1q_f16(outptr0, _sum0);
outptr0 += 8;
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
ac_down.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void fillMatrix(int n, double * matrix);
double * createMatrix(int n);
int main(int argc, char * argv[]) {
unsigned int mSize = 0, i, k;
double * a, * b, * c;
double wtime;
//mSize = 270;
mSize = 600;
a = (double*)createMatrix(mSize);
b = (double*)createMatrix(mSize);
c = (double*)createMatrix(mSize);
fillMatrix(mSize, a);
fillMatrix(mSize, b);
printf ( "Adjoint convolution benchmark with matrix size %d\n", mSize );
wtime = omp_get_wtime ( );
//#pragma omp parallel for shared(a,b,c) private(i,k) schedule(dynamic,1000) num_threads(15)
#pragma omp parallel for private(i,k) schedule(guided,100) num_threads(9)
for (i = 0; i < mSize*mSize; i++) {
c[i]= 0;
for (k = i; k < mSize*mSize; k++) {
//c[i] += 5.5*b[k]*a[k-i];
c[i] += b[k]*a[k-i];
}
}
wtime = omp_get_wtime ( ) - wtime;
printf ( "\n" );
printf ( " Time = %g seconds.\n", wtime );
free(a);
free(b);
free(c);
}
double * createMatrix(int n) {
double * m = (double*) malloc(n*n*sizeof(double));
return m;
}
void fillMatrix(int n, double * matrix) {
int i;
for (i = 0; i < n*n; i++) {
matrix[i] = (rand()%10) - 5; //between -5 and 4
}
}
|
par_csr_matop_device.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "_hypre_parcsr_mv.h"
#include "_hypre_utilities.hpp"
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_Int
hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A,
HYPRE_Int indices_len,
HYPRE_BigInt *indices,
hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int want_data,
void **request_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv;
HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i;
HYPRE_BigInt *d_send_j, *d_recv_j;
HYPRE_Int *send_jstarts, *recv_jstarts;
HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL;
hypre_ParCSRCommPkg *comm_pkg_j;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
/* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */
/* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */
HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int num_procs;
HYPRE_Int my_id;
void **vrequest;
hypre_CSRMatrix *A_ext;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* number of sends (#procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* number of rows to send */
num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
/* number of recvs (#procs) */
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
/* number of rows to recv */
num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs);
/* must be true if indices contains proper offd indices */
hypre_assert(indices_len == num_rows_recv);
/* send_i/recv_i:
* the arrays to send and recv: we first send and recv the row lengths */
d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE);
d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE);
send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST);
recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST);
d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE);
/* fill the send array with row lengths */
hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int,
num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i + 1);
/* send array send_i out: deviceTohost first and MPI (async)
* note the shift in recv_i by one */
hypre_TMemcpy(send_i, d_send_i + 1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i + 1);
hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i);
/* total number of nnz to send */
hypre_TMemcpy(&num_nnz_send, d_send_i + num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_DEVICE);
/* prepare data to send out. overlap with the above commmunication */
d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE);
if (want_data)
{
d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE);
}
if (d_col_map_offd_A == NULL)
{
d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A;
}
/* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */
hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1,
first_col, d_col_map_offd_A,
A_diag_i, A_diag_j, A_diag_a,
A_offd_i, A_offd_j, A_offd_a,
d_send_i, d_send_j, d_send_a);
/* pointers to each proc in send_j */
send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
send_jstarts[0] = 0;
for (i = 1; i <= num_sends; i++)
{
send_jstarts[i] = send_jstarts[i - 1];
for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i - 1);
j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
j++ )
{
send_jstarts[i] += send_i[j];
}
}
hypre_assert(send_jstarts[num_sends] == num_nnz_send);
/* finish the above communication: send_i/recv_i */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* adjust recv_i to ptrs */
recv_i[0] = 0;
for (i = 1; i <= num_rows_recv; i++)
{
recv_i[i] += recv_i[i - 1];
}
num_nnz_recv = recv_i[num_rows_recv];
/* allocate device memory for j and a */
d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE);
if (want_data)
{
d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE);
}
recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
recv_jstarts[0] = 0;
for (i = 1; i <= num_recvs; i++)
{
j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i);
recv_jstarts[i] = recv_i[j];
}
/* ready to send and recv: create a communication package for data */
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm (comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts;
hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts;
/* init communication */
/* ja */
comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j,
HYPRE_MEMORY_DEVICE, d_send_j,
HYPRE_MEMORY_DEVICE, d_recv_j);
if (want_data)
{
/* a */
comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j,
HYPRE_MEMORY_DEVICE, d_send_a,
HYPRE_MEMORY_DEVICE, d_recv_a);
}
else
{
comm_handle_a = NULL;
}
hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_HOST);
/* create A_ext: on device */
A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv);
hypre_CSRMatrixI (A_ext) = d_recv_i;
hypre_CSRMatrixBigJ(A_ext) = d_recv_j;
hypre_CSRMatrixData(A_ext) = d_recv_a;
hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE;
/* output */
vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) A_ext;
*request_ptr = (void *) vrequest;
/* free */
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(recv_i, HYPRE_MEMORY_HOST);
hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE);
hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2];
HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *)
hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL;
HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *)
hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL;
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_TFree(send_j, HYPRE_MEMORY_DEVICE);
hypre_TFree(send_a, HYPRE_MEMORY_DEVICE);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return A_ext;
}
hypre_CSRMatrix*
hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
hypre_CSRMatrix *B;
HYPRE_Int B_nrows = local_num_rows;
HYPRE_BigInt B_ncols = glbal_num_cols;
HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE);
HYPRE_BigInt *B_j;
HYPRE_Complex *B_a;
HYPRE_Int B_nnz;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i + 1);
hypreDevice_IntegerInclusiveScan(B_nrows + 1, B_i);
/* total number of nnz */
hypre_TMemcpy(&B_nnz, B_i + B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE);
B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE);
if (d_col_map_offd_A == NULL)
{
d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A;
}
hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A,
A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a,
B_i, B_j, B_a);
/* output */
B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz);
hypre_CSRMatrixI (B) = B_i;
hypre_CSRMatrixBigJ(B) = B_j;
hypre_CSRMatrixData(B) = B_a;
hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE;
hypre_SyncComputeStream(hypre_handle());
return B;
}
HYPRE_Int
hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext,
hypre_ParCSRCommPkg *comm_pkg_A,
HYPRE_Int want_data,
void **request_ptr)
{
MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A);
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
HYPRE_Int num_elmts_send = send_map_starts[num_sends];
HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs];
HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext);
HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext);
HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext);
HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext);
HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext);
HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext);
HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE);
HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST);
HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST);
hypre_assert(num_elmts_recv == B_ext_nrows);
/* output matrix */
hypre_CSRMatrix *B_int_d;
HYPRE_Int B_int_nrows = num_elmts_send;
HYPRE_Int B_int_ncols = B_ext_ncols;
HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST);
HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE);
HYPRE_BigInt *B_int_j_d = NULL;
HYPRE_Complex *B_int_a_d = NULL;
HYPRE_Int B_int_nnz;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
hypre_ParCSRCommPkg *comm_pkg_j;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i;
HYPRE_Int num_procs, my_id;
void **vrequest;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* B_ext_rownnz contains the number of elements of row j
* (to be determined through send_map_elmnts on the receiving end)
*--------------------------------------------------------------------------*/
HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d);
hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
/*--------------------------------------------------------------------------
* initialize communication: send/recv the row nnz
* (note the use of comm_pkg_A, mode 12, as in transpose matvec
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1);
jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts[0] = 0;
B_ext_i_h[0] = 0;
hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST,
HYPRE_MEMORY_HOST);
for (i = 1; i <= B_ext_nrows; i++)
{
B_ext_i_h[i] += B_ext_i_h[i - 1];
}
hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz);
for (i = 1; i <= num_recvs; i++)
{
jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]];
}
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs;
hypre_ParCSRCommHandleDestroy(comm_handle);
/*--------------------------------------------------------------------------
* compute B_int: row nnz to row ptrs
*--------------------------------------------------------------------------*/
B_int_i_h[0] = 0;
for (i = 1; i <= B_int_nrows; i++)
{
B_int_i_h[i] += B_int_i_h[i - 1];
}
B_int_nnz = B_int_i_h[B_int_nrows];
B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE);
if (want_data)
{
B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE);
}
for (i = 0; i <= num_sends; i++)
{
jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]];
}
/* note the order of send/recv is reversed */
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts;
/* send/recv CSR rows */
if (want_data)
{
comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j,
HYPRE_MEMORY_DEVICE, B_ext_a_d,
HYPRE_MEMORY_DEVICE, B_int_a_d );
}
else
{
comm_handle_a = NULL;
}
comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j,
HYPRE_MEMORY_DEVICE, B_ext_j_d,
HYPRE_MEMORY_DEVICE, B_int_j_d );
hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_HOST);
/* create CSR: on device */
B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz);
hypre_CSRMatrixI(B_int_d) = B_int_i_d;
hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d;
hypre_CSRMatrixData(B_int_d) = B_int_a_d;
hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE;
/* output */
vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) B_int_d;
*request_ptr = (void *) vrequest;
/* free */
hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST);
hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST);
hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ExchangeExternalRowsDeviceWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2];
/* communication done */
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return B_int_d;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
HYPRE_Int
hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int want_data,
void **request_ptr)
{
hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) ==
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) );
/*
hypre_assert( hypre_GetActualMemLocation(
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE );
*/
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
hypre_ParcsrGetExternalRowsDeviceInit(B,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixCommPkg(A),
want_data,
request_ptr);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ParCSRMatrixExtractBExtDeviceWait(void *request)
{
return hypre_ParcsrGetExternalRowsDeviceWait(request);
}
hypre_CSRMatrix*
hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int want_data )
{
void *request;
hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request);
return hypre_ParCSRMatrixExtractBExtDeviceWait(request);
}
/* return B = [Adiag, Aoffd] */
#if 1
__global__ void
hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol,
HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a,
HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a,
HYPRE_Int *cols_offd_map,
HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab)
{
const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1, 1>();
if (row >= nrows)
{
return;
}
/* lane id inside the warp */
const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>();
HYPRE_Int i, j, k, p, istart, iend, bstart;
/* diag part */
if (lane_id < 2)
{
j = read_only_load(d_diag_i + row + lane_id);
}
if (lane_id == 0)
{
k = read_only_load(d_ib + row);
}
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0);
p = bstart - istart;
for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE)
{
d_jb[p + i] = read_only_load(d_diag_j + i);
d_ab[p + i] = read_only_load(d_diag_a + i);
}
/* offd part */
if (lane_id < 2)
{
j = read_only_load(d_offd_i + row + lane_id);
}
bstart += iend - istart;
istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0);
iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1);
p = bstart - istart;
for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE)
{
const HYPRE_Int t = read_only_load(d_offd_j + i);
d_jb[p + i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol;
d_ab[p + i] = read_only_load(d_offd_a + i);
}
}
hypre_CSRMatrix*
hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag),
hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd),
hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) );
hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B));
HYPRE_THRUST_CALL( exclusive_scan,
hypre_CSRMatrixI(B),
hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1,
hypre_CSRMatrixI(B) );
const dim3 bDim = hypre_GetDefaultDeviceBlockDimension();
const dim3 gDim = hypre_GetDefaultDeviceGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd,
gDim, bDim,
hypre_CSRMatrixNumRows(A_diag),
hypre_CSRMatrixNumCols(A_diag),
hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag),
hypre_CSRMatrixData(A_diag),
hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixJ(A_offd),
hypre_CSRMatrixData(A_offd),
NULL,
hypre_CSRMatrixI(B),
hypre_CSRMatrixJ(B),
hypre_CSRMatrixData(B) );
return B;
}
#else
hypre_CSRMatrix*
hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd);
hypre_CSRMatrix *B;
HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz;
HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE);
// Adiag
HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)),
A_diag_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) );
hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE);
// Aoffd
HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)),
A_offd_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz );
hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
A_offd_j,
A_offd_j + A_offd_nnz,
thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)),
B_j + A_diag_nnz,
thrust::plus<HYPRE_Int>() );
// B
HYPRE_THRUST_CALL( stable_sort_by_key,
B_ii,
B_ii + B_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) );
HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii);
hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE);
B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz);
hypre_CSRMatrixI(B) = B_i;
hypre_CSRMatrixJ(B) = B_j;
hypre_CSRMatrixData(B) = B_a;
hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE;
return B;
}
#endif
/* return B = [Adiag, Aoffd; E] */
#if 1
HYPRE_Int
hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A,
hypre_CSRMatrix *E,
hypre_CSRMatrix **B_ptr,
HYPRE_Int *num_cols_offd_ptr,
HYPRE_BigInt **cols_map_offd_ptr)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *E_diag, *E_offd, *B;
HYPRE_Int *cols_offd_map, num_cols_offd;
HYPRE_BigInt *cols_map_offd;
hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A),
hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A),
&cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd);
B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E),
hypre_ParCSRMatrixNumCols(A) + num_cols_offd,
hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) +
hypre_CSRMatrixNumNonzeros(E));
hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B));
HYPRE_THRUST_CALL( exclusive_scan,
hypre_CSRMatrixI(B),
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1,
hypre_CSRMatrixI(B) );
dim3 bDim = hypre_GetDefaultDeviceBlockDimension();
dim3 gDim = hypre_GetDefaultDeviceGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim);
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd,
gDim, bDim,
hypre_CSRMatrixNumRows(A_diag),
hypre_CSRMatrixNumCols(A_diag),
hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag),
hypre_CSRMatrixData(A_diag),
hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixJ(A_offd),
hypre_CSRMatrixData(A_offd),
cols_offd_map,
hypre_CSRMatrixI(B),
hypre_CSRMatrixJ(B),
hypre_CSRMatrixData(B) );
hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1,
HYPRE_Int, hypre_CSRMatrixNumRows(E),
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1,
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1,
thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(
A_offd)),
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1,
thrust::plus<HYPRE_Int>() );
gDim = hypre_GetDefaultDeviceGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim);
hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag));
HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd,
gDim, bDim,
hypre_CSRMatrixNumRows(E_diag),
hypre_CSRMatrixNumCols(E_diag),
hypre_CSRMatrixI(E_diag),
hypre_CSRMatrixJ(E_diag),
hypre_CSRMatrixData(E_diag),
hypre_CSRMatrixI(E_offd),
hypre_CSRMatrixJ(E_offd),
hypre_CSRMatrixData(E_offd),
NULL,
hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A),
hypre_CSRMatrixJ(B),
hypre_CSRMatrixData(B) );
hypre_CSRMatrixDestroy(E_diag);
hypre_CSRMatrixDestroy(E_offd);
*B_ptr = B;
*num_cols_offd_ptr = num_cols_offd;
*cols_map_offd_ptr = cols_map_offd;
return hypre_error_flag;
}
#else
HYPRE_Int
hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A,
hypre_CSRMatrix *E,
hypre_CSRMatrix **B_ptr,
HYPRE_Int *num_cols_offd_ptr,
HYPRE_BigInt **cols_map_offd_ptr)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd);
HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
HYPRE_Int *E_i = hypre_CSRMatrixI(E);
HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E);
HYPRE_Complex *E_a = hypre_CSRMatrixData(E);
HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E);
HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E);
HYPRE_Int E_diag_nnz, E_offd_nnz;
hypre_CSRMatrix *B;
HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz;
HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE);
// E
hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A,
last_col_A, num_cols_offd_A,
NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz,
NULL, NULL, NULL, NULL);
HYPRE_Int *cols_offd_map, num_cols_offd;
HYPRE_BigInt *cols_map_offd;
HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i);
hypre_CSRMatrixSplitDevice_core(1,
E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL,
first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A,
&cols_offd_map, &num_cols_offd, &cols_map_offd,
&E_diag_nnz,
B_ii + A_diag_nnz + A_offd_nnz,
B_j + A_diag_nnz + A_offd_nnz,
B_a + A_diag_nnz + A_offd_nnz,
NULL,
&E_offd_nnz,
B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz,
B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz,
B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz,
NULL);
hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
B_ii + A_diag_nnz + A_offd_nnz,
B_ii + B_nnz,
thrust::make_constant_iterator(A_nrows),
B_ii + A_diag_nnz + A_offd_nnz,
thrust::plus<HYPRE_Int>() );
// Adiag
HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)),
A_diag_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) );
hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE);
// Aoffd
HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i);
HYPRE_THRUST_CALL( copy_n,
thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)),
A_offd_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz );
hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( gather,
A_offd_j,
A_offd_j + A_offd_nnz,
cols_offd_map,
B_j + A_diag_nnz);
hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
B_j + A_diag_nnz,
B_j + A_diag_nnz + A_offd_nnz,
thrust::make_constant_iterator(A_ncols),
B_j + A_diag_nnz,
thrust::plus<HYPRE_Int>() );
HYPRE_THRUST_CALL( transform,
B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz,
B_j + B_nnz,
thrust::make_constant_iterator(A_ncols),
B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz,
thrust::plus<HYPRE_Int>() );
// B
HYPRE_THRUST_CALL( stable_sort_by_key,
B_ii,
B_ii + B_nnz,
thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) );
HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii);
hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE);
B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz);
hypre_CSRMatrixI(B) = B_i;
hypre_CSRMatrixJ(B) = B_j;
hypre_CSRMatrixData(B) = B_a;
hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE;
*B_ptr = B;
*num_cols_offd_ptr = num_cols_offd;
*cols_map_offd_ptr = cols_map_offd;
return hypre_error_flag;
}
#endif
HYPRE_Int
hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
HYPRE_Int nrows, local_row;
HYPRE_BigInt row_start, row_end;
hypre_CSRMatrix *Aa;
hypre_CSRMatrix *Ba;
if (!mat)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat);
Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat);
if (hypre_ParCSRMatrixGetrowactive(mat))
{
return (-1);
}
hypre_ParCSRMatrixGetrowactive(mat) = 1;
row_start = hypre_ParCSRMatrixFirstRowIndex(mat);
row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1;
nrows = row_end - row_start;
if (row < row_start || row >= row_end)
{
return (-1);
}
local_row = row - row_start;
/* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */
if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) )
{
HYPRE_Int max_row_nnz;
HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz);
hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>());
/*
HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows);
hypre_TMemcpy( &max_row_nnz, max_row_nnz_d,
HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE );
*/
hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE);
hypre_ParCSRMatrixRowvalues(mat) =
(HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat));
hypre_ParCSRMatrixRowindices(mat) =
(HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat));
}
else
{
HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE);
hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row,
size_d);
hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TFree(size_d, HYPRE_MEMORY_DEVICE);
}
if (col_ind || values)
{
if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL)
{
hypre_ParCSRMatrixDeviceColMapOffd(mat) =
hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE);
hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat),
hypre_ParCSRMatrixColMapOffd(mat),
HYPRE_BigInt,
hypre_CSRMatrixNumCols(Ba),
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST );
}
hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL,
hypre_ParCSRMatrixFirstColDiag(mat),
hypre_ParCSRMatrixDeviceColMapOffd(mat),
hypre_CSRMatrixI(Aa) + local_row,
hypre_CSRMatrixJ(Aa),
hypre_CSRMatrixData(Aa),
hypre_CSRMatrixI(Ba) + local_row,
hypre_CSRMatrixJ(Ba),
hypre_CSRMatrixData(Ba),
NULL,
hypre_ParCSRMatrixRowindices(mat),
hypre_ParCSRMatrixRowvalues(mat) );
}
if (col_ind)
{
*col_ind = hypre_ParCSRMatrixRowindices(mat);
}
if (values)
{
*values = hypre_ParCSRMatrixRowvalues(mat);
}
hypre_SyncComputeStream(hypre_handle());
return hypre_error_flag;
}
/* Get element-wise tolerances based on row norms for ParCSRMatrix
* NOTE: Keep the diagonal, i.e. elmt_tol = 0.0 for diagonals
* Output vectors have size nnz:
* elmt_tols_diag[j] = tol * (norm of row i) for j in [ A_diag_i[i] , A_diag_i[i+1] )
* elmt_tols_offd[j] = tol * (norm of row i) for j in [ A_offd_i[i] , A_offd_i[i+1] )
* type == -1, infinity norm,
* 1, 1-norm
* 2, 2-norm
*/
template<HYPRE_Int type>
__global__ void
hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols( HYPRE_Int nrows,
HYPRE_Real tol,
HYPRE_Int *A_diag_i,
HYPRE_Int *A_diag_j,
HYPRE_Complex *A_diag_a,
HYPRE_Int *A_offd_i,
HYPRE_Complex *A_offd_a,
HYPRE_Real *elmt_tols_diag,
HYPRE_Real *elmt_tols_offd)
{
HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>();
if (row_i >= nrows)
{
return;
}
HYPRE_Int lane = hypre_cuda_get_lane_id<1>();
HYPRE_Int p_diag, p_offd, q_diag, q_offd;
/* sum row norm over diag part */
if (lane < 2)
{
p_diag = read_only_load(A_diag_i + row_i + lane);
}
q_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 1);
p_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 0);
HYPRE_Real row_norm_i = 0.0;
for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE)
{
HYPRE_Complex val = A_diag_a[j];
if (type == -1)
{
row_norm_i = hypre_max(row_norm_i, hypre_cabs(val));
}
else if (type == 1)
{
row_norm_i += hypre_cabs(val);
}
else if (type == 2)
{
row_norm_i += val * val;
}
}
/* sum row norm over offd part */
if (lane < 2)
{
p_offd = read_only_load(A_offd_i + row_i + lane);
}
q_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 1);
p_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 0);
for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE)
{
HYPRE_Complex val = A_offd_a[j];
if (type == -1)
{
row_norm_i = hypre_max(row_norm_i, hypre_cabs(val));
}
else if (type == 1)
{
row_norm_i += hypre_cabs(val);
}
else if (type == 2)
{
row_norm_i += val * val;
}
}
/* allreduce to get the row norm on all threads */
if (type == -1)
{
row_norm_i = warp_allreduce_max(row_norm_i);
}
else
{
row_norm_i = warp_allreduce_sum(row_norm_i);
}
if (type == 2)
{
row_norm_i = sqrt(row_norm_i);
}
/* set elmt_tols_diag */
for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE)
{
HYPRE_Int col = A_diag_j[j];
/* elmt_tol = 0.0 ensures diagonal will be kept */
if (col == row_i)
{
elmt_tols_diag[j] = 0.0;
}
else
{
elmt_tols_diag[j] = tol * row_norm_i;
}
}
/* set elmt_tols_offd */
for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE)
{
elmt_tols_offd[j] = tol * row_norm_i;
}
}
/* drop the entries that are not on the diagonal and smaller than:
* type 0: tol
* type 1: tol*(1-norm of row)
* type 2: tol*(2-norm of row)
* type -1: tol*(infinity norm of row) */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A,
HYPRE_Complex tol,
HYPRE_Int type)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A);
HYPRE_Real *elmt_tols_diag = NULL;
HYPRE_Real *elmt_tols_offd = NULL;
if (col_map_offd_A == NULL)
{
col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A;
}
/* get elmement-wise tolerances if needed */
if (type != 0)
{
elmt_tols_diag = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_diag), HYPRE_MEMORY_DEVICE);
elmt_tols_offd = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE);
}
dim3 bDim = hypre_GetDefaultDeviceBlockDimension();
dim3 gDim = hypre_GetDefaultDeviceGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim);
if (type == -1)
{
HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols < -1 >, gDim, bDim,
hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd);
}
if (type == 1)
{
HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<1>, gDim, bDim,
hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd);
}
if (type == 2)
{
HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<2>, gDim, bDim,
hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag),
hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd);
}
/* drop entries from diag and offd CSR matrices */
hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, elmt_tols_diag);
hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, elmt_tols_offd);
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A);
/* squeeze out zero columns of A_offd */
HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new;
tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd),
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( sort,
tmp_j,
tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) );
tmp_end = HYPRE_THRUST_CALL( unique,
tmp_j,
tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) );
num_cols_A_offd_new = tmp_end - tmp_j;
hypre_assert(num_cols_A_offd_new <= num_cols_A_offd);
if (num_cols_A_offd_new < num_cols_A_offd)
{
hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new;
HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE);
HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new,
HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( scatter,
thrust::counting_iterator<HYPRE_Int>(0),
thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new),
tmp_j,
offd_mark );
HYPRE_THRUST_CALL( gather,
hypre_CSRMatrixJ(A_offd),
hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd),
offd_mark,
hypre_CSRMatrixJ(A_offd) );
HYPRE_THRUST_CALL( gather,
tmp_j,
tmp_j + num_cols_A_offd_new,
col_map_offd_A,
col_map_offd_A_new );
hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE);
hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE);
hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new;
hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new,
HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt,
num_cols_A_offd_new,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
if (type != 0)
{
hypre_TFree(elmt_tols_diag, HYPRE_MEMORY_DEVICE);
hypre_TFree(elmt_tols_offd, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixTransposeDevice
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixTransposeDevice( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **AT_ptr,
HYPRE_Int data )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *A_diagT;
hypre_CSRMatrix *AT_offd;
HYPRE_Int num_procs;
HYPRE_Int num_cols_offd_AT = 0;
HYPRE_BigInt *col_map_offd_AT = NULL;
hypre_ParCSRMatrix *AT;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
if (num_procs > 1)
{
void *request;
hypre_CSRMatrix *A_offdT, *Aext;
HYPRE_Int *Aext_ii, *Aext_j, Aext_nnz;
HYPRE_Complex *Aext_data;
HYPRE_BigInt *tmp_bigj;
hypre_CSRMatrixTranspose(A_offd, &A_offdT, data);
hypre_CSRMatrixBigJ(A_offdT) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumNonzeros(A_offdT),
HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( transform,
hypre_CSRMatrixJ(A_offdT),
hypre_CSRMatrixJ(A_offdT) + hypre_CSRMatrixNumNonzeros(A_offdT),
thrust::make_constant_iterator(hypre_ParCSRMatrixFirstRowIndex(A)),
hypre_CSRMatrixBigJ(A_offdT),
thrust::plus<HYPRE_BigInt>() );
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
hypre_ExchangeExternalRowsDeviceInit(A_offdT, hypre_ParCSRMatrixCommPkg(A), data, &request);
hypre_CSRMatrixTranspose(A_diag, &A_diagT, data);
Aext = hypre_ExchangeExternalRowsDeviceWait(request);
hypre_CSRMatrixDestroy(A_offdT);
// Aext contains offd of AT
Aext_nnz = hypre_CSRMatrixNumNonzeros(Aext);
Aext_ii = hypreDevice_CsrRowPtrsToIndices(hypre_CSRMatrixNumRows(Aext), Aext_nnz,
hypre_CSRMatrixI(Aext));
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(hypre_ParCSRMatrixCommPkg(A));
HYPRE_THRUST_CALL( gather,
Aext_ii,
Aext_ii + Aext_nnz,
hypre_ParCSRCommPkgDeviceSendMapElmts(hypre_ParCSRMatrixCommPkg(A)),
Aext_ii );
tmp_bigj = hypre_TAlloc(HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp_bigj, hypre_CSRMatrixBigJ(Aext), HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( sort,
tmp_bigj,
tmp_bigj + Aext_nnz );
HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique,
tmp_bigj,
tmp_bigj + Aext_nnz );
num_cols_offd_AT = new_end - tmp_bigj;
col_map_offd_AT = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(col_map_offd_AT, tmp_bigj, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_DEVICE);
hypre_TFree(tmp_bigj, HYPRE_MEMORY_DEVICE);
Aext_j = hypre_TAlloc(HYPRE_Int, Aext_nnz, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( lower_bound,
col_map_offd_AT,
col_map_offd_AT + num_cols_offd_AT,
hypre_CSRMatrixBigJ(Aext),
hypre_CSRMatrixBigJ(Aext) + Aext_nnz,
Aext_j );
Aext_data = hypre_CSRMatrixData(Aext);
hypre_CSRMatrixData(Aext) = NULL;
hypre_CSRMatrixDestroy(Aext);
if (data)
{
hypreDevice_StableSortByTupleKey(Aext_nnz, Aext_ii, Aext_j, Aext_data, 0);
}
else
{
HYPRE_THRUST_CALL( stable_sort,
thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)),
thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)) + Aext_nnz );
}
AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), num_cols_offd_AT, Aext_nnz);
hypre_CSRMatrixJ(AT_offd) = Aext_j;
hypre_CSRMatrixData(AT_offd) = Aext_data;
hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE);
hypreDevice_CsrRowIndicesToPtrs_v2(hypre_CSRMatrixNumRows(AT_offd), Aext_nnz, Aext_ii,
hypre_CSRMatrixI(AT_offd));
hypre_TFree(Aext_ii, HYPRE_MEMORY_DEVICE);
}
else
{
hypre_CSRMatrixTransposeDevice(A_diag, &A_diagT, data);
AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), 0, 0);
hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE);
}
AT = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixColStarts(A),
hypre_ParCSRMatrixRowStarts(A),
num_cols_offd_AT,
hypre_CSRMatrixNumNonzeros(A_diagT),
hypre_CSRMatrixNumNonzeros(AT_offd));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(AT));
hypre_ParCSRMatrixDiag(AT) = A_diagT;
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(AT));
hypre_ParCSRMatrixOffd(AT) = AT_offd;
if (num_cols_offd_AT)
{
hypre_ParCSRMatrixDeviceColMapOffd(AT) = col_map_offd_AT;
hypre_ParCSRMatrixColMapOffd(AT) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AT), col_map_offd_AT, HYPRE_BigInt, num_cols_offd_AT,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
*AT_ptr = AT;
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixAddDevice( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
HYPRE_Complex beta,
hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix **C_ptr )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_Int num_cols_offd_C = 0;
HYPRE_BigInt *d_col_map_offd_C = NULL;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
hypre_CSRMatrix *C_diag = hypre_CSRMatrixAddDevice(alpha, A_diag, beta, B_diag);
hypre_CSRMatrix *C_offd;
//if (num_cols_offd_A || num_cols_offd_B)
if (num_procs > 1)
{
hypre_ParCSRMatrixCopyColMapOffdToDevice(A);
hypre_ParCSRMatrixCopyColMapOffdToDevice(B);
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A + num_cols_offd_B,
HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp, hypre_ParCSRMatrixDeviceColMapOffd(A), HYPRE_BigInt,
num_cols_offd_A, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(tmp + num_cols_offd_A, hypre_ParCSRMatrixDeviceColMapOffd(B), HYPRE_BigInt,
num_cols_offd_B, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE);
HYPRE_THRUST_CALL( sort, tmp, tmp + num_cols_offd_A + num_cols_offd_B );
HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp, tmp + num_cols_offd_A + num_cols_offd_B );
num_cols_offd_C = new_end - tmp;
d_col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(d_col_map_offd_C, tmp, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_DEVICE);
/* reuse memory of tmp */
HYPRE_Int *offd_A2C = (HYPRE_Int *) tmp;
HYPRE_Int *offd_B2C = offd_A2C + num_cols_offd_A;
HYPRE_THRUST_CALL( lower_bound,
d_col_map_offd_C,
d_col_map_offd_C + num_cols_offd_C,
hypre_ParCSRMatrixDeviceColMapOffd(A),
hypre_ParCSRMatrixDeviceColMapOffd(A) + num_cols_offd_A,
offd_A2C );
HYPRE_THRUST_CALL( lower_bound,
d_col_map_offd_C,
d_col_map_offd_C + num_cols_offd_C,
hypre_ParCSRMatrixDeviceColMapOffd(B),
hypre_ParCSRMatrixDeviceColMapOffd(B) + num_cols_offd_B,
offd_B2C );
HYPRE_Int *C_offd_i, *C_offd_j, nnzC_offd;
HYPRE_Complex *C_offd_a;
hypreDevice_CSRSpAdd( hypre_CSRMatrixNumRows(A_offd),
hypre_CSRMatrixNumRows(B_offd),
num_cols_offd_C,
hypre_CSRMatrixNumNonzeros(A_offd),
hypre_CSRMatrixNumNonzeros(B_offd),
hypre_CSRMatrixI(A_offd),
hypre_CSRMatrixJ(A_offd),
alpha,
hypre_CSRMatrixData(A_offd),
offd_A2C,
hypre_CSRMatrixI(B_offd),
hypre_CSRMatrixJ(B_offd),
beta,
hypre_CSRMatrixData(B_offd),
offd_B2C,
NULL,
&nnzC_offd,
&C_offd_i,
&C_offd_j,
&C_offd_a );
hypre_TFree(tmp, HYPRE_MEMORY_DEVICE);
C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), num_cols_offd_C, nnzC_offd);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_CSRMatrixJ(C_offd) = C_offd_j;
hypre_CSRMatrixData(C_offd) = C_offd_a;
hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_DEVICE;
}
else
{
C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), 0, 0);
hypre_CSRMatrixInitialize_v2(C_offd, 0, HYPRE_MEMORY_DEVICE);
}
/* Create ParCSRMatrix C */
hypre_ParCSRMatrix *C = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
num_cols_offd_C,
hypre_CSRMatrixNumNonzeros(C_diag),
hypre_CSRMatrixNumNonzeros(C_offd));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C));
hypre_ParCSRMatrixDiag(C) = C_diag;
hypre_ParCSRMatrixOffd(C) = C_offd;
if (num_cols_offd_C)
{
hypre_ParCSRMatrixDeviceColMapOffd(C) = d_col_map_offd_C;
hypre_ParCSRMatrixColMapOffd(C) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(C), d_col_map_offd_C, HYPRE_BigInt, num_cols_offd_C,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
hypre_ParCSRMatrixSetNumNonzeros(C);
hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C);
/* create CommPkg of C */
hypre_MatvecCommPkgCreate(C);
*C_ptr = C;
return hypre_error_flag;
}
#endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
/*--------------------------------------------------------------------------
* HYPRE_ParCSRDiagScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRDiagScaleVector( HYPRE_ParCSRMatrix HA,
HYPRE_ParVector Hy,
HYPRE_ParVector Hx )
{
hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA;
hypre_ParVector *y = (hypre_ParVector *) Hy;
hypre_ParVector *x = (hypre_ParVector *) Hx;
HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y));
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x));
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data);
//hypre_SyncComputeStream(hypre_handle());
#else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_size; i++)
{
x_data[i] = y_data[i] / A_data[A_i[i]];
}
#endif /* #if defined(HYPRE_USING_CUDA) */
return ierr;
}
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example use of firstprivate()
*/
void foo(int * a, int n, int g)
{
int i;
#pragma omp parallel for firstprivate (g) schedule(dynamic)
for (i=0;i<n;i++)
{
a[i] = a[i]+g;
}
}
int a[100];
int main()
{
foo(a, 100, 7);
return 0;
}
|
ast-dump-openmp-begin-declare-variant_decl_1.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
// FIXME: We have to improve the warnings here as nothing is impacted by the declare variant.
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(device={kind(cpu)})
int also_before(void);
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(100):llvm)})
int also_after(void);
#pragma omp end declare variant
#pragma omp begin declare variant match(implementation={vendor(score(0):llvm)})
int also_before(void);
#pragma omp end declare variant
int also_after(void) {
return 0;
}
int test() {
// Should return 0.
return also_after() + also_before();
}
// Make sure:
// - we do see the ast nodes for the cpu kind
// - we do see the ast nodes for the llvm vendor
// - we pick the right callees
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] prev [[ADDR_0]] <line:10:1, col:21> col:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_5:0x[a-z0-9]*]] <line:13:1, col:20> col:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_6:0x[a-z0-9]*]] prev [[ADDR_4]] <line:16:1, col:21> col:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] prev [[ADDR_5]] <line:19:1, line:21:1> line:19:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:22, line:21:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:20:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: `-FunctionDecl [[ADDR_11:0x[a-z0-9]*]] <line:23:1, line:26:1> line:23:5 test 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_12:0x[a-z0-9]*]] <col:12, line:26:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_13:0x[a-z0-9]*]] <line:25:3, col:37>
// CHECK-NEXT: `-BinaryOperator [[ADDR_14:0x[a-z0-9]*]] <col:10, col:37> 'int' '+'
// CHECK-NEXT: |-CallExpr [[ADDR_15:0x[a-z0-9]*]] <col:10, col:21> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_7]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_18:0x[a-z0-9]*]] <col:25, col:37> 'int'
// CHECK-NEXT: `-ImplicitCastExpr [[ADDR_19:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: `-DeclRefExpr [[ADDR_20:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_6]] 'also_before' 'int ({{.*}})'
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches decomposition-declarations.
///
/// Examples matches the declaration node with \c foo and \c bar, but not
/// \c number.
/// (matcher = declStmt(has(decompositionDecl())))
///
/// \code
/// int number = 42;
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicAllOfMatcher<DecompositionDecl>
decompositionDecl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template arguments (with location info).
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgumentLoc()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc>
templateArgumentLoc;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches template template parameter declarations.
///
/// Given
/// \code
/// template <template <typename> class Z, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'Z', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
TemplateTemplateParmDecl>
templateTemplateParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches all arguments and their respective types for a \c CallExpr or
/// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but
/// it works on calls through function pointers as well.
///
/// The difference is, that function pointers do not provide access to a
/// \c ParmVarDecl, but only the \c QualType for each argument.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// void (*f_ptr)(int) = f;
/// f_ptr(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParamType(
/// declRefExpr(to(varDecl(hasName("y")))),
/// qualType(isInteger()).bind("type)
/// ))
/// matches f(y) and f_ptr(y)
/// with declRefExpr(...)
/// matching int y
/// and qualType(...)
/// matching int
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<QualType>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
const FunctionProtoType *FProto = nullptr;
if (const auto *Call = dyn_cast<CallExpr>(&Node)) {
if (const auto *Value =
dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) {
QualType QT = Value->getType().getCanonicalType();
// This does not necessarily lead to a `FunctionProtoType`,
// e.g. K&R functions do not have a function prototype.
if (QT->isFunctionPointerType())
FProto = QT->getPointeeType()->getAs<FunctionProtoType>();
if (QT->isMemberFunctionPointerType()) {
const auto *MP = QT->getAs<MemberPointerType>();
assert(MP && "Must be member-pointer if its a memberfunctionpointer");
FProto = MP->getPointeeType()->getAs<FunctionProtoType>();
assert(FProto &&
"The call must have happened through a member function "
"pointer");
}
}
}
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder,
&ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
// This test is cheaper compared to the big matcher in the next if.
// Therefore, please keep this order.
if (FProto) {
QualType ParamType = FProto->getParamType(ParamIndex);
if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, hasType(ParamMatcher))))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, hasType(ParamMatcher)))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body. Note that in case of functions
/// this matcher only matches the definition itself and not the other
/// declarations of the same function.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
///
/// Given
/// \code
/// void f();
/// void f() {}
/// \endcode
/// hasBody(functionDecl())
/// matches 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches a function declaration that has a given body present in the AST.
/// Note that this matcher matches all the declarations of a function whose
/// body is present in the AST.
///
/// Given
/// \code
/// void f();
/// void f() {}
/// void g();
/// \endcode
/// hasAnyBody(functionDecl())
/// matches both 'void f();'
/// and 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void g();'
AST_MATCHER_P(FunctionDecl, hasAnyBody,
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = Node.getBody();
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
inline internal::Matcher<BinaryOperator>
hasOperands(const internal::Matcher<Expr> &Matcher1,
const internal::Matcher<Expr> &Matcher2) {
return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
core_single_cpu.c | /* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; -*- */
/*
* This code has been contributed by the DARPA HPCS program. Contact
* David Koester <dkoester@mitre.org> or Bob Lucas <rflucas@isi.edu>
* if you have questions.
*
* GUPS (Giga UPdates per Second) is a measurement that profiles the memory
* architecture of a system and is a measure of performance similar to MFLOPS.
* The HPCS HPCchallenge RandomAccess benchmark is intended to exercise the
* GUPS capability of a system, much like the LINPACK benchmark is intended to
* exercise the MFLOPS capability of a computer. In each case, we would
* expect these benchmarks to achieve close to the "peak" capability of the
* memory system. The extent of the similarities between RandomAccess and
* LINPACK are limited to both benchmarks attempting to calculate a peak system
* capability.
*
* GUPS is calculated by identifying the number of memory locations that can be
* randomly updated in one second, divided by 1 billion (1e9). The term "randomly"
* means that there is little relationship between one address to be updated and
* the next, except that they occur in the space of one half the total system
* memory. An update is a read-modify-write operation on a table of 64-bit words.
* An address is generated, the value at that address read from memory, modified
* by an integer operation (add, and, or, xor) with a literal value, and that
* new value is written back to memory.
*
* We are interested in knowing the GUPS performance of both entire systems and
* system subcomponents --- e.g., the GUPS rating of a distributed memory
* multiprocessor the GUPS rating of an SMP node, and the GUPS rating of a
* single processor. While there is typically a scaling of FLOPS with processor
* count, a similar phenomenon may not always occur for GUPS.
*
* For additional information on the GUPS metric, the HPCchallenge RandomAccess
* Benchmark,and the rules to run RandomAccess or modify it to optimize
* performance -- see http://icl.cs.utk.edu/hpcc/
*
*/
/*
* This file contains the computational core of the single cpu version
* of GUPS. The inner loop should easily be vectorized by compilers
* with such support.
*
* This core is used by both the single_cpu and star_single_cpu tests.
*/
#include <hpcc.h>
#include "RandomAccess.h"
/* Number of updates to table (suggested: 4x number of table entries) */
#define NUPDATE (4 * TableSize)
static void
RandomAccessUpdate(u64Int TableSize, u64Int *Table) {
u64Int i;
u64Int ran[128]; /* Current random numbers */
int j;
/* Perform updates to main table. The scalar equivalent is:
*
* u64Int ran;
* ran = 1;
* for (i=0; i<NUPDATE; i++) {
* ran = (ran << 1) ^ (((s64Int) ran < 0) ? POLY : 0);
* table[ran & (TableSize-1)] ^= ran;
* }
*/
for (j=0; j<128; j++)
ran[j] = HPCC_starts ((NUPDATE/128) * j);
for (i=0; i<NUPDATE/128; i++) {
/* #pragma ivdep */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (j=0; j<128; j++) {
ran[j] = (ran[j] << 1) ^ ((s64Int) ran[j] < 0 ? POLY : 0);
Table[ran[j] & (TableSize-1)] ^= ran[j];
}
}
}
int
HPCC_RandomAccess(HPCC_Params *params, int doIO, double *GUPs, int *failure) {
u64Int i;
u64Int temp;
double cputime; /* CPU time to update table */
double realtime; /* Real time to update table */
double totalMem;
u64Int *Table;
u64Int logTableSize, TableSize;
FILE *outFile = NULL;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) {
outFile = stderr;
fprintf( outFile, "Cannot open output file.\n" );
return 1;
}
}
/* calculate local memory per node for the update table */
totalMem = params->HPLMaxProcMem;
totalMem /= sizeof(u64Int);
/* calculate the size of update array (must be a power of 2) */
for (totalMem *= 0.5, logTableSize = 0, TableSize = 1;
totalMem >= 1.0;
totalMem *= 0.5, logTableSize++, TableSize <<= 1)
; /* EMPTY */
Table = HPCC_XMALLOC( u64Int, TableSize );
if (! Table) {
if (doIO) {
fprintf( outFile, "Failed to allocate memory for the update table (" FSTR64 ").\n", TableSize);
fclose( outFile );
}
return 1;
}
params->RandomAccess_N = (s64Int)TableSize;
/* Print parameters for run */
if (doIO) {
fprintf( outFile, "Main table size = 2^" FSTR64 " = " FSTR64 " words\n", logTableSize,TableSize);
fprintf( outFile, "Number of updates = " FSTR64 "\n", NUPDATE);
}
/* Initialize main table */
for (i=0; i<TableSize; i++) Table[i] = i;
/* Begin timing here */
cputime = -CPUSEC();
realtime = -RTSEC();
RandomAccessUpdate( TableSize, Table );
/* End timed section */
cputime += CPUSEC();
realtime += RTSEC();
/* make sure no division by zero */
*GUPs = (realtime > 0.0 ? 1.0 / realtime : -1.0);
*GUPs *= 1e-9*NUPDATE;
/* Print timing results */
if (doIO) {
fprintf( outFile, "CPU time used = %.6f seconds\n", cputime);
fprintf( outFile, "Real time used = %.6f seconds\n", realtime);
fprintf( outFile, "%.9f Billion(10^9) Updates per second [GUP/s]\n", *GUPs );
}
/* Verification of results (in serial or "safe" mode; optional) */
temp = 0x1;
for (i=0; i<NUPDATE; i++) {
temp = (temp << 1) ^ (((s64Int) temp < 0) ? POLY : 0);
Table[temp & (TableSize-1)] ^= temp;
}
temp = 0;
for (i=0; i<TableSize; i++)
if (Table[i] != i)
temp++;
if (doIO) {
fprintf( outFile, "Found " FSTR64 " errors in " FSTR64 " locations (%s).\n",
temp, TableSize, (temp <= 0.01*TableSize) ? "passed" : "failed");
}
if (temp <= 0.01*TableSize) *failure = 0;
else *failure = 1;
HPCC_free( Table );
if (doIO) {
fflush( outFile );
fclose( outFile );
}
return 0;
}
|
wallfade.c | #include <GL/gl.h> // for glColor4f, glTexCoord2f, glVertex2i
#include <GL/glx.h> // for glXChooseVisual, glXCreateContext
#include <X11/X.h> // for None, Window
#include <X11/Xatom.h> // for XA_ATOM
#include <X11/Xlib.h> // for Screen, (anonymous), XOpenDisplay
#include <X11/Xutil.h> // for XVisualInfo
#include <dirent.h> // for DIR, opendir, closedir, readdir
#include <getopt.h> // for optarg, getopt
#include <glob.h> // for glob_t, glob, globfree, GLOB_BRACE
#include <limits.h> // for PATH_MAX
#include <signal.h> // for signal, SIGINT, SIGKILL, SIGQUIT
#include <stdbool.h> // for bool
#include <stdint.h> // for uint32_t
#include <stdio.h> // for fprintf, NULL, printf, stderr
#include <stdlib.h> // for exit, free, malloc, rand, realpath
#include <string.h> // for __s1_len, __s2_len, strcmp, strlen
#include <sys/time.h> // for CLOCK_MONOTONIC
#include <tgmath.h> // for fmaxf, fminf
#include <time.h> // for timespec, clock_gettime, time
#include <unistd.h> // for usleep
#include <ctype.h> // for isdigit
#include <libgen.h>
#include <X11/extensions/Xrandr.h> // for XRRMonitorInfo, XRRFreeMonitors
#include <X11/extensions/Xinerama.h>
#include <iniparser.h>
#include <pwd.h>
#include <sys/shm.h>
#include <sys/stat.h>
#include "magick.h"
#define MEM_SIZE 4096
#define MAX_MONITORS 10
#define DEFAULT_IDLE_TIME 3
#define DEFAULT_FADE_TIME 1
#define S_(x) #x
#define S(x) S_(x)
#define MESSAGE(y,x) !strcmp(y, x)
#define MSG_NONE '\0'
#define MSG_PARENT '\1'
#define MSG_DONE '\2'
struct Path {
char path[PATH_MAX];
};
struct Plane {
int width;
int height;
int x;
int y;
uint32_t front;
uint32_t back;
char front_path[PATH_MAX];
char back_path[PATH_MAX];
};
struct OpenGL {
GLXContext ctx;
};
struct _settings {
Display *dpy;
Screen *scr;
XVisualInfo *vi;
Window root;
Window desktop;
Window win;
int base;
int parent;
float seconds;
float timer;
uint32_t screen;
int nmon;
int *nfiles;
float fade;
int idle;
int smoothfunction;
bool running;
bool fading;
bool center;
char lower[PATH_MAX];
char default_path[PATH_MAX];
char *shmem;
struct Plane *planes;
struct Path *paths;
struct OpenGL opengl;
} settings;
pthread_t thread;
Window findByClass(const char *classname);
Window findDesktop();
float getDeltaTime();
int getMonitorsXRR();
int getMonitorsXinerama();
void initOpengl();
int init();
void gotsig(int signum);
void shutdown();
void drawplane(struct Plane *plane, uint32_t texture, float alpha);
void drawplanes();
void update();
int checkfile(char *file);
char **getfiles(int monitor);
void cleanFiles(char **files, int total_files);
void ThrowWandException(MagickWand *wand);
MagickWand *doMagick(const char *current, int width, int height);
void loadTexture(const char *current, uint32_t *id, int width, int height);
void randomImage(uint32_t *side, struct Plane *plane, const char *not,
int monitor);
void randomImages(int monitor);
int parsePaths(char *paths, int (*outputPtr)(const char *, ...));
int handler(Display *dpy, XErrorEvent *e);
int getProcIdByName(const char *proc_name);
char *createSharedMemory(size_t size, int parent);
int messageRespond(const char *format, ...);
void loadConfig();
void printConfig();
int handler(Display *dpy, XErrorEvent *e)
{
fprintf(stderr, "X Error code: %d\n", e->error_code);
return 0;
}
Window findByClass(const char *classname)
{
unsigned int n;
Window troot, parent, *children;
char *name;
XWindowAttributes attrs;
XQueryTree(settings.dpy, settings.root, &troot, &parent, &children, &n);
for (unsigned int i = 0; i < n; i++) {
int status = XFetchName(settings.dpy, children[i], &name);
status |= XGetWindowAttributes(settings.dpy, children[i], &attrs);
if ((status != 0) && (NULL != name)) {
if ((!strncmp(name, classname, strlen(classname)))) {
XFree(children);
XFree(name);
return children[i];
}
if (name) {
XFree(name);
}
}
}
return 0;
}
Window findDesktop()
{
unsigned int n;
Window troot, parent, *children;
char *name;
XWindowAttributes attrs;
XQueryTree(settings.dpy, settings.root, &troot, &parent, &children, &n);
for (unsigned int i = 0; i < n; i++) {
int status = XFetchName(settings.dpy, children[i], &name);
status |= XGetWindowAttributes(settings.dpy, children[i], &attrs);
if ((status != 0) && (NULL != name)) {
if (
(attrs.map_state != 0) &&
(attrs.width == settings.scr->width) &&
(attrs.height == settings.scr->height) &&
(!strcmp(name, "Desktop"))
) {
settings.win = children[i];
XFree(children);
XFree(name);
return children[i];
}
if (name) {
XFree(name);
}
}
}
return settings.root;
}
float getDeltaTime()
{
static struct timespec last_ts;
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
float difftime = (ts.tv_sec * 1000 + ts.tv_nsec / 1000000) -
(last_ts.tv_sec * 1000 + last_ts.tv_nsec / 1000000);
last_ts = ts;
return difftime * 0.001f;
}
int getMonitorsXinerama()
{
if (!XineramaIsActive(settings.dpy)) {
return 0;
}
XineramaScreenInfo *monitors = XineramaQueryScreens(
settings.dpy,
&settings.nmon
);
if (monitors == 0) {
return 0;
}
settings.planes = malloc(settings.nmon * sizeof(struct Plane));
settings.nfiles = malloc(settings.nmon * sizeof(int));
for (int i = 0; i < settings.nmon; i++) {
settings.planes[i].width = monitors[i].width;
settings.planes[i].height = monitors[i].height;
settings.planes[i].x = monitors[i].x_org;
settings.planes[i].y = monitors[i].y_org;
settings.planes[i].front = 0;
settings.planes[i].back = 0;
printf(
"monitor: %d %dx%d+%d+%d\n",
i,
monitors[i].width,
monitors[i].height,
monitors[i].x_org,
monitors[i].y_org
);
}
return 1;
}
int getMonitorsXRR()
{
XRRMonitorInfo *monitors = XRRGetMonitors(
settings.dpy,
settings.root,
0,
&settings.nmon
);
if (monitors == 0) {
return 0;
}
settings.planes = malloc(settings.nmon * sizeof(struct Plane));
settings.nfiles = malloc(settings.nmon * sizeof(int));
for (int i = 0; i < settings.nmon; i++) {
settings.planes[i].width = monitors[i].width;
settings.planes[i].height = monitors[i].height;
settings.planes[i].x = monitors[i].x;
settings.planes[i].y = monitors[i].y;
settings.planes[i].front = 0;
settings.planes[i].back = 0;
printf(
"monitor: %d %dx%d+%d+%d\n",
i,
monitors[i].width,
monitors[i].height,
monitors[i].x,
monitors[i].y
);
}
XRRFreeMonitors(monitors);
return 1;
}
void initOpengl()
{
settings.opengl.ctx = glXCreateContext(
settings.dpy,
settings.vi,
NULL,
GL_TRUE
);
if (!settings.opengl.ctx) {
fprintf(stderr, "Error: glXCreateContext failed\n");
exit(-1);
}
glXMakeCurrent(settings.dpy, settings.win, settings.opengl.ctx);
printf("GL_RENDERER = %s\n", (char *) glGetString(GL_RENDERER));
//glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glEnable(GL_TEXTURE_2D);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, settings.scr->width, settings.scr->height, 0, -100.0f, 100.0f);
glViewport(0, 0, settings.scr->width, settings.scr->height);
glClearColor(0, 0, 0, 1);
}
int init(int argc, char **argv)
{
#ifdef GraphicsMagick
InitializeMagick(*argv);
#else
MagickWandGenesis();
#endif
settings.dpy = XOpenDisplay(NULL);
if (settings.dpy == NULL) {
fprintf(stderr, "Cannot connect to X server\n");
return 0;
}
XSetErrorHandler(handler);
settings.scr = DefaultScreenOfDisplay(settings.dpy);
if (settings.scr == NULL) {
fprintf(stderr, "No screen found\n");
return 0;
}
printf("ScreenSize: %dx%d\n", settings.scr->width, settings.scr->height);
GLint vi_att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None };
settings.vi = glXChooseVisual(settings.dpy, settings.screen, vi_att);
if (settings.vi == NULL) {
fprintf(stderr, "No appropriate visual found\n");
return 0;
}
settings.root = RootWindow(settings.dpy, settings.screen);
if (strlen(settings.lower)) {
printf("Waiting for %s...\n", settings.lower);
Window wid = 0;
do {
wid = findByClass(settings.lower);
if (wid != 0) {
break;
}
usleep(100000);
} while (1);
printf("Found %s (%lu)\n", settings.lower, wid);
XLowerWindow(settings.dpy, wid);
}
XSetWindowAttributes attr = {0};
attr.override_redirect = 1;
Window desktop = findDesktop();
settings.win = XCreateWindow(
settings.dpy,
desktop,
0,
0,
settings.scr->width,
settings.scr->height,
0,
CopyFromParent,
InputOutput,
CopyFromParent,
CWOverrideRedirect,
&attr
);
XSizeHints xsh;
xsh.flags = PSize | PPosition;
xsh.width = settings.scr->width;
xsh.height = settings.scr->height;
XWMHints xwmh;
xwmh.flags = InputHint;
xwmh.input = 0;
XSetWMProperties(
settings.dpy,
settings.win,
NULL,
NULL,
argv,
argc,
&xsh,
&xwmh,
NULL
);
XClassHint hints;
hints.res_name = "wallfade";
hints.res_class = "Wallfade";
XSetClassHint(
settings.dpy,
settings.win,
&hints
);
Atom state[5];
state[0] = XInternAtom(settings.dpy, "_NET_WM_STATE_BELOW", 0);
state[1] = XInternAtom(settings.dpy, "_NET_WM_STATE_FULLSCREEN", 0);
state[2] = XInternAtom(settings.dpy, "_NET_WM_STATE_SKIP_PAGER", 0);
state[3] = XInternAtom(settings.dpy, "_NET_WM_STATE_SKIP_TASKBAR", 0);
state[4] = XInternAtom(settings.dpy, "_NET_WM_STATE_STICKY", 0);
Atom type = XInternAtom(settings.dpy, "_NET_WM_WINDOW_TYPE_DESKTOP", 0);
XChangeProperty(
settings.dpy,
settings.win,
XInternAtom(settings.dpy, "_NET_WM_STATE", 0),
XA_ATOM,
32,
PropModeReplace,
(unsigned char *) state,
4
);
XChangeProperty(
settings.dpy,
settings.win,
XInternAtom(settings.dpy, "_NET_WM_WINDOW_TYPE", 1),
XA_ATOM,
32,
PropModeReplace,
(unsigned char *) &type,
1
);
XMapWindow(settings.dpy, settings.win);
XLowerWindow(settings.dpy, settings.win);
XSync(settings.dpy, settings.win);
if (!getMonitorsXRR()) {
if (!getMonitorsXinerama()) {
fprintf(stderr, "Unable to find monitors\n");
exit(-1);
}
}
initOpengl();
return 1;
}
void gotsig(int signum)
{
settings.running = false;
}
void shutdown()
{
shmdt(&settings.shmem);
if (settings.planes) {
free(settings.planes);
}
if (settings.paths) {
free(settings.paths);
}
for (int i = 0; i < settings.nmon; i++) {
if (settings.planes[i].front != 0) {
glDeleteTextures(1, &settings.planes[i].front);
}
if (settings.planes[i].back != 0) {
glDeleteTextures(1, &settings.planes[i].back);
}
}
glXDestroyContext(settings.dpy, settings.opengl.ctx);
#ifdef GraphicsMagick
DestroyMagick();
#else
MagickWandTerminus();
#endif
}
void drawPlane(struct Plane *plane, uint32_t texture, float alpha)
{
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glColor4f(1, 1, 1, alpha);
glVertex2i(
plane->x,
plane->y
);
glTexCoord2f(0, 1);
glColor4f(1, 1, 1, alpha);
glVertex2i(
plane->x,
plane->y + plane->height
);
glTexCoord2f(1, 1);
glColor4f(1, 1, 1, alpha);
glVertex2i(
plane->x + plane->width,
plane->y + plane->height
);
glTexCoord2f(1, 0);
glColor4f(1, 1, 1, alpha);
glVertex2i(
plane->x + plane->width,
plane->y
);
glEnd();
}
float smooth(float min, float max, float val)
{
float out = 0;
if (settings.smoothfunction == 1) {
out = val; // linear
} else {
float s = fmaxf(0, fminf(1, (val - min) / (max - min)));
switch (settings.smoothfunction) {
default:
case 2: // smoothstep
out = s * s * (3 - 2 * s);
break;
case 3: // smootherstep
out = s * s * (s * (s * 6 - 15) + 10);
break;
}
}
return out;
}
void drawPlanes()
{
float alpha = 0.0f;
if (settings.fading) {
static float linear = 0.0f;
linear += settings.fade * settings.seconds;
alpha = smooth(0.0f, 1.0f, linear);
if (linear > 1.0f) {
settings.fading = false;
for (int i = 0; i < settings.nmon; i++) {
uint32_t tmp = settings.planes[i].front;
settings.planes[i].front = settings.planes[i].back;
sprintf(
settings.planes[i].front_path,
"%.*s",
(int)sizeof(settings.planes[i].front_path),
settings.planes[i].back_path
);
settings.planes[i].back = tmp;
randomImage(
&settings.planes[i].back,
&settings.planes[i],
settings.planes[i].front_path,
i
);
}
linear = 0.0f;
alpha = 0.0f;
}
}
for (int i = 0; i < settings.nmon; i++) {
if (settings.nfiles[i]) {
if (settings.nfiles[i] > 1) {
drawPlane(
&settings.planes[i],
settings.planes[i].back,
1.0f
);
drawPlane(
&settings.planes[i],
settings.planes[i].front,
1.0f - alpha
);
} else {
drawPlane(
&settings.planes[i],
settings.planes[i].front,
1.0f
);
randomImage(
&settings.planes[i].back,
&settings.planes[i],
settings.planes[i].front_path,
i
);
usleep(500000);
}
} else {
randomImages(i);
usleep(500000);
}
}
}
int messageRespond(const char *format, ...)
{
while (settings.shmem[0] == MSG_PARENT) {
usleep(10000);
}
char output[MEM_SIZE] = {0};
va_list args;
va_start(args, format);
vsprintf(output, format, args);
sprintf(
settings.shmem,
"%c%.*s",
MSG_PARENT,
MEM_SIZE,
output
);
printf(output);
va_end(args);
return 0;
}
void checkMessages()
{
if (
settings.shmem[0] != MSG_NONE &&
settings.shmem[0] != MSG_PARENT &&
settings.shmem[0] != MSG_DONE
) {
char *tmpstr = strdup(settings.shmem);
char separator[3] = " \0";
char *token = strtok(tmpstr, separator);
while (token != 0) {
char *command = strdup(token);
if (MESSAGE(command, "help")) {
char output[MEM_SIZE] = {0};
int len = 0;
len += sprintf(output + len, "wallfade messages:\n");
len += sprintf(output + len, "\tcurrent : display current wallpapers\n");
len += sprintf(output + len,
"\tnext : force wallfade to change wallpapers\n");
len += sprintf(output + len, "\tfade : set fade time\n");
len += sprintf(output + len, "\tidle : set idle time\n");
len += sprintf(output + len, "\tsmooth : change smoothfunction\n");
len += sprintf(output + len, "\tpaths : change paths\n");
len += sprintf(output + len, "\tconfig : print current config\n");
messageRespond(output);
break;
} else if (MESSAGE(command, "current")) {
char output[MEM_SIZE] = {0};
for (int i = 0; i < settings.nmon; i++) {
char line[256] = {0};
sprintf(
line,
"Monitor %d: %.*s\n",
i,
100,
settings.planes[i].front_path
);
int len = strlen(output);
sprintf(output + len, "%.*s", MEM_SIZE - len, line);
}
messageRespond(output);
} else if (MESSAGE(command, "paths")) {
token = strtok(0, separator);
if (token != 0) {
for (int i = 0; i < settings.nmon; i++) {
memset(settings.paths[i].path, 0, PATH_MAX);
}
parsePaths(token, messageRespond);
} else {
char output[MEM_SIZE] = {0};
for (int i = 0; i < settings.nmon; i++) {
char line[256] = {0};
if (strlen(settings.paths[i].path) > 0) {
sprintf(
line,
"Monitor %d: %.*s\n",
i,
100,
settings.paths[i].path
);
} else {
sprintf(
line,
"Monitor %d: %.*s\n",
i,
100,
settings.default_path
);
}
int len = strlen(output);
sprintf(output + len, "%.*s", MEM_SIZE - len, line);
}
messageRespond(output);
}
break;
} else if (MESSAGE(command, "next")) {
settings.fading = true;
settings.timer = 0;
messageRespond("forcing next wallpapers\n");
} else if (MESSAGE(command, "fade")) {
token = strtok(0, separator);
if (token != 0 && isdigit(token[0])) {
settings.fade = 1.0f / strtof(token, 0);
messageRespond("setting %s to %s\n", command, token);
} else {
messageRespond(
"%s is set to %.2f\n",
command,
1.0f / settings.fade
);
break;
}
} else if (MESSAGE(command, "idle")) {
token = strtok(0, separator);
if (token != 0 && isdigit(token[0])) {
settings.idle = strtol(token, 0, 10);
messageRespond("setting %s to %s\n", command, token);
} else {
messageRespond(
"%s is set to %d\n",
command,
settings.idle
);
break;
}
} else if (MESSAGE(command, "smooth")) {
token = strtok(0, separator);
if (token != 0 && isdigit(token[0])) {
settings.smoothfunction = strtol(token, 0, 10);
messageRespond("setting %s to %s\n", command, token);
} else {
messageRespond(
"%s is set to %d\n",
command,
settings.smoothfunction
);
break;
}
} else if (MESSAGE(command, "config")) {
printConfig();
} else {
messageRespond("Unknown command \"%s\"\n", token);
break;
}
if (command) {
free(command);
}
token = strtok(0, separator);
}
if (tmpstr) {
free(tmpstr);
}
settings.shmem[0] = MSG_DONE;
}
}
void update()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
drawPlanes();
glXSwapBuffers(settings.dpy, settings.win);
XFlush(settings.dpy);
settings.seconds = getDeltaTime();
if (settings.timer >= settings.idle) {
settings.fading = true;
settings.timer = 0;
}
if (!settings.fading) {
settings.timer += settings.seconds;
usleep(50000);
}
usleep(50000);
checkMessages();
}
char **getFiles(int monitor, int *total_files)
{
char **files = 0;
glob_t globbuf;
int err = glob(
settings.paths[monitor].path,
GLOB_BRACE | GLOB_TILDE,
NULL,
&globbuf
);
settings.nfiles[monitor] = 0;
if (err == 0) {
files = malloc((globbuf.gl_pathc + 1) * sizeof(char *));
*total_files = globbuf.gl_pathc;
size_t i;
int nfiles = 0;
#pragma omp parallel for private(i) reduction(+:nfiles)
for (i = 0; i < globbuf.gl_pathc; i++) {
char *file = realpath(globbuf.gl_pathv[i], NULL);
if (file == NULL) {
fprintf(
stderr,
"Unable to resolve realpath for %s",
globbuf.gl_pathv[i]
);
} else {
files[i] = malloc(PATH_MAX);
sprintf(files[i], "%.*s", PATH_MAX - 1, file);
nfiles++;
free(file);
}
}
settings.nfiles[monitor] = nfiles;
globfree(&globbuf);
}
return files;
}
void cleanFiles(char **files, int total_files)
{
if (total_files) {
for (int i = 0; i < total_files; i++) {
free(files[i]);
}
free(files);
}
}
void ThrowWandException(MagickWand *wand)
{
char *description;
ExceptionType severity;
description = MagickGetException(wand, &severity);
fprintf(stderr, "Wand Error: %s\n", description);
MagickRelinquishMemory(description);
exit(-1);
}
MagickWand *doMagick(const char *current, int width, int height)
{
MagickWand *wand = NewMagickWand();
int status = MagickReadImage(wand, current);
if (status == MagickFalse) {
ThrowWandException(wand);
}
status = MagickSetImageGravity(wand, CenterGravity);
if (status == MagickFalse) {
ThrowWandException(wand);
}
int orig_height = MagickGetImageHeight(wand);
int orig_width = MagickGetImageWidth(wand);
int newheight = orig_height;
int newwidth = orig_width;
double screen_aspect = (double)width / (double)height;
double image_aspect = (double)orig_width / (double)orig_height;
if (screen_aspect < image_aspect) {
newwidth = (int)((double)orig_height * screen_aspect);
} else {
newheight = (int)((double)orig_width / screen_aspect);
}
if (settings.center) {
status = MagickCropImage(
wand,
newwidth,
newheight,
(orig_width - newwidth) / 2,
(orig_height - newheight) / 2
);
} else {
status = MagickCropImage(wand, newwidth, newheight, 0, 0);
}
if (status == MagickFalse) {
ThrowWandException(wand);
}
#if ImageMagick_MajorVersion < 7 || GraphicsMagick
MagickResizeImage(wand, width, height, GaussianFilter, 1.0);
#else
MagickResizeImage(wand, width, height, GaussianFilter);
#endif
if (status == MagickFalse) {
ThrowWandException(wand);
}
return wand;
}
void loadTexture(const char *current, uint32_t *id, int width, int height)
{
MagickWand *wand = doMagick(current, width, height);
unsigned char *data = malloc((width * height) * 3);
#ifdef GraphicsMagick
int status = MagickGetImagePixels(
wand,
0,
0,
width,
height,
"RGB",
CharPixel,
data
);
#else
int status = MagickExportImagePixels(
wand,
0,
0,
width,
height,
"RGB",
CharPixel,
data
);
#endif
if (status == MagickFalse) {
ThrowWandException(wand);
}
if (*id != 0) {
glBindTexture(GL_TEXTURE_2D, *id);
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
width,
height,
GL_RGB,
GL_UNSIGNED_BYTE,
data
);
} else {
glGenTextures(1, id);
glBindTexture(GL_TEXTURE_2D, *id);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
width,
height,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
data
);
}
DestroyMagickWand(wand);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glBindTexture(GL_TEXTURE_2D, 0);
MagickRelinquishMemory(data);
}
void randomImage(uint32_t *side, struct Plane *plane, const char *not,
int monitor)
{
int total_files = 0;
char **files = getFiles(monitor, &total_files);
if (settings.nfiles[monitor] > 0) {
int bkrand = 0;
do {
bkrand = random() % settings.nfiles[monitor];
} while (
strcmp(files[bkrand], not) == 0 &&
settings.nfiles[monitor] != 1
);
sprintf(
plane->back_path,
"%.*s",
(int)sizeof(plane->back_path),
files[bkrand]
);
loadTexture(
plane->back_path,
side,
plane->width,
plane->height
);
}
cleanFiles(files, total_files);
}
void randomImages(int monitor)
{
randomImage(
&settings.planes[monitor].front,
&settings.planes[monitor],
"",
monitor
);
if (settings.nfiles[monitor] > 1) {
randomImage(
&settings.planes[monitor].back,
&settings.planes[monitor],
settings.planes[monitor].front_path,
monitor
);
}
}
int parsePaths(char *paths, int (*outputPtr)(const char *, ...))
{
if (strlen(paths)) {
char *p = 0;
while ((p = strsep(&paths, ",\0")) != NULL) {
char *m = 0;
if ((m = strsep(&p, ":")) != NULL && p != NULL) {
int monitor = atoi(m);
if (monitor >= 0 && monitor < settings.nmon) {
sprintf(
settings.paths[monitor].path,
"%.*s",
(int)sizeof(settings.paths[monitor].path),
p
);
if (strlen(settings.default_path) == 0) {
sprintf(
settings.default_path,
"%.*s",
(int)sizeof(settings.default_path) - 1,
p
);
}
} else {
fprintf(stderr, "Monitor %d not found.\n", monitor);
}
} else if (m != NULL) {
sprintf(
settings.default_path,
"%.*s",
(int)sizeof(settings.default_path) - 1,
m
);
}
}
}
if (strlen(settings.default_path)) {
outputPtr("Default path: %s\n", settings.default_path);
}
for (int i = 0; i < settings.nmon; i++) {
int len = strlen(settings.paths[i].path);
if (len == 0) {
if (strlen(settings.default_path)) {
sprintf(
settings.paths[i].path,
"%.*s",
(int)sizeof(settings.paths[i].path),
settings.default_path
);
len = strlen(settings.default_path);
} else {
fprintf(
stderr,
"Tried to set default path but none was specified!\n"
);
settings.running = false;
return 0;
}
}
outputPtr("Monitor %d path: %s\n", i, settings.paths[i].path);
if (settings.paths[i].path[len - 1] != '/') {
sprintf(
settings.paths[i].path + len,
"%.*s",
(int)sizeof(settings.paths[i].path) - len,
"/"
);
len = strlen(settings.paths[i].path);
}
sprintf(
settings.paths[i].path + len,
"%.*s",
(int)sizeof(settings.paths[i].path) - len,
"*.{jpg,png}"
);
}
return 1;
}
void help(const char *filename)
{
printf("Usage: %s [options]\n", filename);
printf(" -f, fade : fade time (default 1.0s)\n");
printf(" -i, idle : idle time (default 3s)\n");
printf(" -s, smooth : smoothing function to use when fading.\n");
printf(" 1: linear\n");
printf(" 2: smoothstep (default)\n");
printf(" 3: smootherstep\n");
printf("\n");
printf(" -p, paths : wallpapers paths.\n");
printf(" use monitor:path, if no monitor is\n");
printf(" specified the path will be used as\n");
printf(" the default path\n");
printf("\n");
printf(" example: -p 0:path,1:path,path\n");
printf("\n");
printf(" -l, lower : finds and lowers window by classname (e.g. Conky)\n");
printf(" -c, center : center wallpapers\n");
printf(" -m, message : send message to running process (-m help)\n");
printf(" -h, help : help\n");
printf("\n");
}
int getProcIdByName(const char *proc_name)
{
int pid = -1;
int current = getpid();
DIR *proc = opendir("/proc");
if (proc != NULL) {
struct dirent *ent;
while (pid < 0 && (ent = readdir(proc))) {
int id = atoi(ent->d_name);
if (id > 0 && id != current) {
char path[PATH_MAX] = {0};
sprintf(path, "/proc/%s/stat", ent->d_name);
FILE *f = fopen(path, "r");
if (f == NULL) {
continue;
}
char name[PATH_MAX];
int ret = fscanf(f, "%*d (%" S(PATH_MAX) "[^)]", name);
if (ret > 0 && !strcmp(name, proc_name)) {
pid = id;
break;
}
fclose(f);
}
}
}
closedir(proc);
return pid;
}
const char *getHomeDir()
{
const char *homedir = getenv("HOME");
if (homedir != 0) {
return homedir;
}
struct passwd *result = getpwuid(getuid());
if (result == 0) {
fprintf(stderr, "Unable to find home-directory\n");
exit(EXIT_FAILURE);
}
homedir = result->pw_dir;
return homedir;
}
bool fileExists(const char *name)
{
struct stat buffer;
return (stat(name, &buffer) == 0);
}
void loadConfig()
{
dictionary *ini = 0;
char filename[PATH_MAX] = {0};
char file[255] = {"/wallfade.ini"};
const char *confdir = getenv("XDG_CONFIG_HOME");
if (confdir == 0) {
sprintf(file, "/.wallfade.ini");
confdir = getHomeDir();
} else {
sprintf(filename, "%s%s", confdir, file);
if (!fileExists(filename)) {
sprintf(file, "/.wallfade.ini");
confdir = getHomeDir();
}
}
sprintf(filename, "%s%s", confdir, file);
if (!fileExists(filename)) {
sprintf(filename, "./wallfade.ini"); // Useful when debugging
}
if (fileExists(filename)) {
ini = iniparser_load(filename);
}
settings.smoothfunction = iniparser_getint(ini, "settings:smooth", 2);
settings.idle = iniparser_getint(ini, "settings:idle", DEFAULT_IDLE_TIME);
settings.fade = iniparser_getdouble(ini, "settings:fade", DEFAULT_FADE_TIME);
settings.fade = 1.0f / settings.fade;
settings.center = iniparser_getboolean(ini, "settings:center", false);
strcpy(settings.lower, iniparser_getstring(ini, "settings:lower", "\0"));
strcpy(
settings.default_path,
iniparser_getstring(ini, "paths:default", "\0")
);
settings.paths = malloc(MAX_MONITORS * sizeof(struct Path));
for (int i = 0; i < MAX_MONITORS; i += 1) {
char monitor[256] = {0};
sprintf(monitor, "paths:monitor%d", i);
strcpy(settings.paths[i].path, iniparser_getstring(ini, monitor, "\0"));
if (strlen(settings.default_path) == 0) {
strcpy(settings.default_path, settings.paths[i].path);
}
}
iniparser_freedict(ini);
}
void printConfig()
{
messageRespond("[SETTINGS]\n");
messageRespond("smooth = %i\n", settings.smoothfunction);
messageRespond("idle = %i\n", settings.idle);
messageRespond("fade = %f\n", 1.0f / settings.fade);
messageRespond("center = %s\n", settings.center ? "TRUE" : "FALSE");
if (settings.lower[0] != 0) {
messageRespond("lower = %s\n", settings.lower);
}
messageRespond("\n[PATHS]\n");
if (settings.default_path[0] != 0) {
char *dironly = strdup(settings.default_path);
dironly = dirname(dironly);
messageRespond("default = \"%s\"\n", dironly);
free(dironly);
}
for (int i = 0; i < MAX_MONITORS; i++) {
if (settings.paths[i].path[0] != 0) {
char *dironly = strdup(settings.paths[i].path);
dironly = dirname(dironly);
messageRespond("monitor%i = \"%s\"\n", i, dironly);
free(dironly);
}
}
}
char *createSharedMemory(size_t size, int parent)
{
int shmid = shmget(parent, size, IPC_CREAT | S_IRUSR | S_IWUSR);
return shmat(shmid, 0, 0);
}
int main(int argc, char *argv[])
{
int c;
settings.parent = getProcIdByName("wallfade");
if (settings.parent == -1) {
struct timespec ts;
if (timespec_get(&ts, TIME_UTC) == 0) {
fprintf(stderr, "Unable to get time for random!\n");
return EXIT_FAILURE;
}
srandom(ts.tv_nsec ^ ts.tv_sec);
signal(SIGINT, gotsig);
signal(SIGKILL, gotsig);
signal(SIGTERM, gotsig);
signal(SIGQUIT, gotsig);
settings.shmem = createSharedMemory(MEM_SIZE, getpid());
memset(settings.shmem, 0, MEM_SIZE);
memset(settings.default_path, 0, PATH_MAX);
memset(settings.lower, 0, PATH_MAX);
settings.timer = 0;
settings.running = true;
settings.fading = false;
settings.planes = NULL;
loadConfig();
}
static const struct option longOpts[] = {
{ "lower", required_argument, 0, 'l' },
{ "center", required_argument, 0, 'c' },
{ "paths", required_argument, 0, 'p' },
{ "smooth", required_argument, 0, 's' },
{ "fade", required_argument, 0, 'f' },
{ "idle", required_argument, 0, 'i' },
{ "message", required_argument, 0, 'm' },
{ "help", no_argument, 0, 'h' },
{ 0, 0, 0, 0 }
};
int longIndex = 0;
char paths[PATH_MAX * MAX_MONITORS] = {0};
while ((c = getopt_long(
argc,
argv,
"o:p:f:i:hcs:l:m:",
longOpts,
&longIndex
)) != -1) {
switch (c) {
case 'f':
settings.fade = 1.0f / strtof(optarg, NULL);
break;
case 'i':
settings.idle = strtol(optarg, NULL, 10);
break;
case 'l':
sprintf(settings.lower, "%.*s", PATH_MAX - 1, optarg);
break;
case 'c':
settings.center = true;
break;
case 's':
settings.smoothfunction = strtol(optarg, NULL, 10);
break;
case 'p':
sprintf(paths, "%.*s", (PATH_MAX * MAX_MONITORS) - 1, optarg);
break;
case 'm':
if (settings.parent != -1) {
settings.shmem = createSharedMemory(
MEM_SIZE,
settings.parent
);
while (settings.shmem[0] != MSG_NONE) {
usleep(10000);
}
sprintf(settings.shmem, "%.*s", MEM_SIZE, optarg);
while (settings.shmem[0] != MSG_DONE) {
if (settings.shmem[0] == MSG_PARENT) {
printf("%s", &settings.shmem[1]);
settings.shmem[0] = MSG_NONE;
}
usleep(10000);
}
printf("%s", &settings.shmem[1]);
settings.shmem[0] = MSG_NONE;
shmdt(&settings.shmem);
} else {
fprintf(stderr, "No wallfade process found!\n");
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
case 'h':
help(argv[0]);
return EXIT_SUCCESS;
default:
break;
}
}
if (settings.parent != -1) {
fprintf(stderr, "Another wallfade processes is already running!\n");
return EXIT_FAILURE;
}
if (strlen(paths) == 0 && strlen(settings.default_path) == 0) {
fprintf(stderr, "No paths specified!\n");
help(argv[0]);
return EXIT_FAILURE;
} else {
if (init(argc, argv)) {
if (parsePaths(paths, printf)) {
for (int i = 0; i < settings.nmon; i++) {
randomImages(i);
}
while (settings.running) {
update();
}
}
shutdown();
}
}
return EXIT_SUCCESS;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) {
for (t4=max(max(ceild(t1-508,512),ceild(4*t2-Nz-1011,1024)),ceild(32*t3-Ny-1011,1024));t4<=min(min(min(floord(4*Nt+Nx-9,1024),floord(2*t1+Nx-3,1024)),floord(4*t2+Nx-9,1024)),floord(32*t3+Nx+19,1024));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(1024*t4,4*t5+4);
ubv=min(1024*t4+1023,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unaryop__identity_int64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_int8
// op(A') function: GB_tran__identity_int64_int8
// C type: int64_t
// A type: int8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_int8
(
int64_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
seekwriter.h | /*****************************************************************************
* This file is provided under the Creative Commons Attribution 3.0 license.
*
* You are free to share, copy, distribute, transmit, or adapt this work
* PROVIDED THAT you attribute the work to the authors listed below.
* For more information, please see the following web page:
* http://creativecommons.org/licenses/by/3.0/
*
* This file is a part of SEEK (Search-based exploration of expression compendium)
* which is authored and maintained by: Qian Zhu (qzhu@princeton.edu)
*
* If you use this file, please cite the following publication:
* Qian Zhu, Aaron K Wong, Arjun Krishnan, Miriam R Aure, Alicja Tadych,
* Ran Zhang, David C Corney, Casey S Greene, Lars A Bongo,
* Vessela N Kristensen, Moses Charikar, Kai Li & Olga G Troyanskaya
* "Targeted exploration and analysis of large cross-platform human
* transcriptomic compendia" Nat Methods (2015)
*
* This file is a component of the Sleipnir library for functional genomics,
* authored by:
* Curtis Huttenhower (chuttenh@princeton.edu)
* Mark Schroeder
* Maria D. Chikina
* Olga G. Troyanskaya (ogt@princeton.edu, primary contact)
*
* If you use this library for development, or use any other Sleipnir executable
* tools, please also cite the following publication:
* Curtis Huttenhower, Mark Schroeder, Maria D. Chikina, and
* Olga G. Troyanskaya.
* "The Sleipnir library for computational functional genomics"
*****************************************************************************/
#ifndef SEEKWRITER_H
#define SEEKWRITER_H
#include "seekbasic.h"
#include "seekmap.h"
#include "seekevaluate.h"
#include "sparsematrix.h"
#include "datapair.h"
#include "seekreader.h"
#include "strassen.h"
namespace Sleipnir {
class CSeekWriter {
public:
//should be either unsigned short or utype
template<class tType>
static bool ReadSeekSparseMatrixHeader(const char *fileName,
CSeekIntIntMap &m) {
FILE *f = fopen(fileName, "rb");
if (f == nullptr) {
cerr << "File not found" << endl;
return false;
}
size_t j;
tType val, numPresent;
int ret;
//m need to be initialized to size vecstrGenes.size() first!
ret = fread((char *) (&numPresent), 1, sizeof(numPresent), f);
for (j = 0; j < numPresent; j++) {
ret = fread((char *) (&val), 1, sizeof(val), f); //val stores the present gene
m.Add((utype) val);
}
fclose(f);
return true;
}
//compatibility
template<class tType>
static bool ReadSeekSparseMatrix(const char *fileName,
CSparseFlatMatrix<float> &mat, CSeekIntIntMap &m, const int maxRank,
const float rbp_p, const vector <string> &vecstrGenes) {
FILE *f = fopen(fileName, "rb");
if (f == nullptr) {
cerr << "File not found" << endl;
return false;
}
size_t i, j;
tType numGenes, numPresent, val;
int ret;
mat.Initialize(vecstrGenes.size());
ret = fread((char *) (&numPresent), 1, sizeof(numPresent), f);
for (j = 0; j < numPresent; j++) {
ret = fread((char *) (&val), 1, sizeof(val), f); //val = gene ID
m.Add((utype) val);
mat.InitializeRow(val, maxRank * 2); //initial capacity
}
ret = fread((char *) (&numGenes), 1, sizeof(numGenes), f);
vector<float> rbp_score;
rbp_score.resize(maxRank);
for (i = 0; i < maxRank; i++)
rbp_score[i] = (1.0 - rbp_p) * pow(rbp_p, (float) i);
for (i = 0; i < numGenes; i++) {
tType id, id2; //gene ID
unsigned short numEntries, val; //rank
ret = fread((char *) (&id), 1, sizeof(id), f);
ret = fread((char *) (&numEntries), 1, sizeof(numEntries), f);
for (j = 0; j < numEntries; j++) {
ret = fread((char *) (&id2), 1, sizeof(id2), f);
ret = fread((char *) (&val), 1, sizeof(val), f);
tType first = id;
tType second = id2;
//mat is a full matrix
mat.Add(first, second, rbp_score[val]);
mat.Add(second, first, rbp_score[val]);
}
}
fclose(f);
mat.Organize();
size_t ii, jj;
const vector <utype> &allRGenes = m.GetAllReverse();
fprintf(stderr, "Begin calculating row sum\n");
vector<float> vecSum;
CSeekTools::InitVector(vecSum, vecstrGenes.size(), (float) 0);
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
const vector <CPair<float>> &row = mat.GetRow(i);
for (jj = 0; jj < row.size(); jj++)
vecSum[i] += row[jj].v;
}
vector<float> vecSqrtSum;
CSeekTools::InitVector(vecSqrtSum, vecstrGenes.size(), (float) 0);
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
if (vecSum[i] == 0) continue;
vecSqrtSum[i] = sqrtf(vecSum[i]);
}
fprintf(stderr, "Begin normalization using row sum\n");
float rv;
#pragma omp parallel for \
shared(m, mat, allRGenes, vecSqrtSum) \
private(ii, i, j, rv) schedule(dynamic)
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
vector<CPair<float> >::iterator row_it;
for (row_it = mat.RowBegin(i); row_it != mat.RowEnd(i); row_it++) {
j = (size_t) row_it->i;
rv = row_it->v;
if (vecSqrtSum[i] == 0 || vecSqrtSum[j] == 0) continue;
row_it->v = rv / vecSqrtSum[i] / vecSqrtSum[j];
}
}
return true;
}
//compatibility
template<class tType>
static bool RemoveDominant(CSparseFlatMatrix<float> &mat,
CSeekIntIntMap &m, const vector <string> &vecstrGenes) {
size_t i, j;
vector <vector<float>> tmp_mat, tmp2;
tmp_mat.resize(vecstrGenes.size());
tmp2.resize(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); i++) {
tmp_mat[i].resize(vecstrGenes.size());
tmp2[i].resize(vecstrGenes.size());
for (j = 0; j < vecstrGenes.size(); j++) {
tmp_mat[i][j] = 0;
tmp2[i][j] = 0;
}
}
size_t ii, jj;
const vector <utype> &allRGenes = m.GetAllReverse();
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
vector<CPair<float> >::iterator row_it;
for (row_it = mat.RowBegin(i); row_it != mat.RowEnd(i); row_it++) {
j = (size_t) row_it->i;
tmp_mat[i][j] = row_it->v;
}
}
int TOP = 100;
fprintf(stderr, "Started removing dominant...\n");
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
vector <CPair<float>> vp;
vp.resize(m.GetNumSet());
for (jj = 0; jj < m.GetNumSet(); jj++) {
j = (size_t) allRGenes[jj];
vp[jj].i = (utype) j;
vp[jj].v = tmp_mat[i][j];
}
nth_element(vp.begin(), vp.begin() + TOP, vp.end(), CDescendingValue<float>());
sort(vp.begin(), vp.begin() + TOP, CDescendingValue<float>());
//top 100
size_t k, l;
size_t max_ind = 0;
float max_val = -1;
for (k = 0; k < TOP; k++) {
size_t this_g = vp[k].i;
float v = 0;
for (l = 0; l < TOP; l++) {
size_t other_g = vp[l].i;
if (this_g == other_g) continue;
v += tmp_mat[this_g][other_g];
}
if (v > max_val) {
max_val = v;
max_ind = k;
}
}
for (jj = 0; jj < m.GetNumSet(); jj++) {
j = (size_t) allRGenes[jj];
tmp2[i][j] = tmp_mat[i][j] - tmp_mat[j][max_ind];
if (tmp2[i][j] < 0)
tmp2[i][j] = 0;
}
}
fprintf(stderr, "Started re-normalizing matrix...\n");
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
for (jj = ii + 1; jj < m.GetNumSet(); jj++) {
j = (size_t) allRGenes[jj];
float m = max(tmp2[i][j], tmp2[j][i]);
tmp2[i][j] = m;
tmp2[j][i] = m;
}
}
vector<float> vecSum;
CSeekTools::InitVector(vecSum, vecstrGenes.size(), (float) 0);
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
for (jj = 0; jj < m.GetNumSet(); jj++) {
j = (size_t) allRGenes[jj];
vecSum[i] += tmp2[i][j];
}
}
vector<float> vecSqrtSum;
CSeekTools::InitVector(vecSqrtSum, vecstrGenes.size(), (float) 0);
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
if (vecSum[i] == 0) continue;
vecSqrtSum[i] = sqrtf(vecSum[i]);
}
for (ii = 0; ii < m.GetNumSet(); ii++) {
i = (size_t) allRGenes[ii];
vector<CPair<float> >::iterator row_it;
for (row_it = mat.RowBegin(i); row_it != mat.RowEnd(i); row_it++) {
j = (size_t) row_it->i;
if (vecSqrtSum[i] == 0 || vecSqrtSum[j] == 0) continue;
row_it->v = tmp2[i][j] / vecSqrtSum[i] / vecSqrtSum[j];
//fprintf(stderr, "%d %d %.3e\n", i, j, row_it->v);
}
}
return true;
}
//compatibility
template<class tType>
static bool WriteSparseMatrix(CDataPair &Dat,
vector <map<tType, unsigned short>> &umat,
const vector <string> &vecstrGenes, const char *fileName) {
FILE *f = fopen(fileName, "wb");
if (f == nullptr) {
fprintf(stderr, "File not found %s\n", fileName);
return false;
}
size_t i, j;
vector <tType> veciGenes;
veciGenes.resize(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); i++)
veciGenes[i] = (tType) Dat.GetGeneIndex(vecstrGenes[i]);
CSeekIntIntMap mm(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); i++)
if (veciGenes[i] != (tType) -1)
mm.Add((utype) i);
auto numPresent = (tType) mm.GetNumSet();
//1 tType
fwrite((char *) (&numPresent), 1, sizeof(numPresent), f);
const vector <utype> &allR = mm.GetAllReverse();
//numPresent tType
for (i = 0; i < numPresent; i++) {
auto pr = (tType) allR[i];
fwrite((char *) (&pr), 1, sizeof(pr), f);
}
tType numGenes = 0;
for (i = 0; i < vecstrGenes.size(); i++) {
if (umat[i].size() == 0) continue;
numGenes++;
}
//1 tType
fwrite((char *) (&numGenes), 1, sizeof(numGenes), f);
for (i = 0; i < vecstrGenes.size(); i++) {
unsigned short numEntries = umat[i].size(); //should be 1000
if (numEntries == 0)
continue;
//1 tType
auto gi = (tType) i;
fwrite((char *) (&gi), 1, sizeof(gi), f);
//1 unsigned short
fwrite((char *) (&numEntries), 1, sizeof(numEntries), f);
typename map<tType, unsigned short>::iterator it;
for (it = umat[i].begin(); it != umat[i].end(); it++) {
tType first = it->first;
unsigned short second = it->second;
//1 tType
fwrite((char *) (&first), 1, sizeof(first), f);
//1 unsigned short
fwrite((char *) (&second), 1, sizeof(second), f);
}
}
fclose(f);
return true;
}
//compatiblity
template<class tType>
static bool GetSparseRankMatrix(CDataPair &Dat,
vector <map<tType, unsigned short>> &umat, int maxRank,
const vector <string> &vecstrGenes) {
size_t i, j;
vector <tType> veciGenes;
veciGenes.resize(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); ++i)
veciGenes[i] = (tType) Dat.GetGeneIndex(vecstrGenes[i]);
umat.resize(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); i++)
umat[i] = map<tType, unsigned short>();
fprintf(stderr, "Start reading DAB...\n");
tType s, t;
for (i = 0; i < vecstrGenes.size(); i++) {
if ((s = veciGenes[i]) == (tType) -1) continue;
if (i % 1000 == 0) fprintf(stderr, "Start reading gene %zu...\n", i);
float *v = Dat.GetFullRow(s);
vector <AResultFloat> vv;
vv.resize(vecstrGenes.size());
for (j = 0; j < vecstrGenes.size(); j++) {
vv[j].i = (utype) j;
vv[j].f = -9999;
if ((t = veciGenes[j]) == (tType) -1) continue;
if (CMeta::IsNaN(v[t])) continue;
vv[j].f = v[t];
}
nth_element(vv.begin(), vv.begin() + maxRank, vv.end());
sort(vv.begin(), vv.begin() + maxRank);
for (j = 0; j < vecstrGenes.size(); j++) {
if (j < maxRank) {
auto first = (tType) i;
auto second = (tType) vv[j].i;
if ((tType) i >= (tType) vv[j].i) {
first = (tType) vv[j].i;
second = (tType) i;
}
typename map<tType, unsigned short>::iterator it;
if ((it = umat[first].find(second)) == umat[first].end())
umat[first][second] = (unsigned short) j;
else
umat[first][second] = std::min(it->second, (unsigned short) j);
}
}
delete[] v;
}
fprintf(stderr, "Finished reading DAB\n");
return true;
}
//To be used after NormalizeDAB
template<class tType>
static bool ConvertToSparseMatrix(CDataPair &Dat,
vector <map<tType, unsigned short>> &umat,
const vector <string> &vecstrGenes, const float cutoff_val) {
size_t i, j;
vector <tType> veciGenes;
veciGenes.resize(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); ++i)
veciGenes[i] = (tType) Dat.GetGeneIndex(vecstrGenes[i]);
umat.resize(vecstrGenes.size());
for (i = 0; i < vecstrGenes.size(); i++)
umat[i] = map<tType, unsigned short>();
tType s, t;
for (i = 0; i < vecstrGenes.size(); i++) {
if ((s = veciGenes[i]) == (tType) -1) continue;
if (i % 1000 == 0) fprintf(stderr, "Start reading gene %zu...\n", i);
for (j = i + 1; j < vecstrGenes.size(); j++) {
if ((t = veciGenes[j]) == (tType) -1) continue;
float r = Dat.Get(s, t);
if (CMeta::IsNaN(r)) continue;
if (r > cutoff_val)
umat[i][j] = (unsigned short) (r * 100.0);
}
}
fprintf(stderr, "Finished reading DAB\n");
return true;
}
//to be used for sparse matrix created from cutting-off z-scores
template<class tType>
static bool ReadSeekSparseMatrix(const char *fileName,
CSparseFlatMatrix<float> &mat, CSeekIntIntMap &m,
const vector <string> &vecstrGenes, const int initialCapacity,
const float exponent) {
if (exponent < 1.0) {
fprintf(stderr, "Exponent must be >=1.0\n");
return false;
}
FILE *f = fopen(fileName, "rb");
if (f == nullptr) {
cerr << "File not found" << endl;
return false;
}
size_t i, j;
tType numGenes, numPresent, val;
int ret;
mat.Initialize(vecstrGenes.size());
ret = fread((char *) (&numPresent), 1, sizeof(numPresent), f);
for (j = 0; j < numPresent; j++) {
ret = fread((char *) (&val), 1, sizeof(val), f); //val = gene ID
m.Add((utype) val);
mat.InitializeRow(val, initialCapacity); //initial capacity
}
ret = fread((char *) (&numGenes), 1, sizeof(numGenes), f);
for (i = 0; i < numGenes; i++) {
tType id, id2; //gene ID
unsigned short numEntries, val; //z-scores * 100.0
ret = fread((char *) (&id), 1, sizeof(id), f);
ret = fread((char *) (&numEntries), 1, sizeof(numEntries), f);
for (j = 0; j < numEntries; j++) {
ret = fread((char *) (&id2), 1, sizeof(id2), f);
ret = fread((char *) (&val), 1, sizeof(val), f);
tType first = id;
tType second = id2;
float fval = (float) val / 100.0;
if (exponent > 1.0)
fval = pow(fval, exponent);
mat.Add(first, second, fval);
mat.Add(second, first, fval);
}
}
fclose(f);
mat.Organize();
return true;
}
//===============================================================
//not currently used
static bool ReadSparseMatrix(const char *fileName,
vector <map<utype, float>> &mat,
CSeekIntIntMap &m, int maxRank, float rbp_p,
const vector <string> &vecstrGenes);
//not currently used
static bool ProductNorm(const vector <map<utype, float>> &mat1,
const vector <map<utype, float>> &mat2, const CSeekIntIntMap &m1,
const CSeekIntIntMap &m2, vector <map<utype, float>> &re);
//================================================================
static bool SumSparseMatrix(CSparseFlatMatrix<float> &mat1,
CSparseFlatHalfMatrix<float> &res, const CSeekIntIntMap &mi, float w);
static bool SumSparseMatrix(CSparseFlatMatrix<float> &mat1,
CHalfMatrix<float> &res, const CSeekIntIntMap &mi, float w);
static bool NormalizeDAB(CDataPair &Dat, const vector <string> &vecstrGenes,
//bool cutoff, float cutoff_val,
bool expTransform, bool divideNorm, bool subtractNorm);
static bool GetGeneAverage(CDataPair &Dat,
const vector <string> &vecstrGenes,
vector<float> &vecResult, bool logit = false, float top_percent = 1.0);
static bool GetGenePresence(CDataPair &Dat,
const vector <string> &vecstrGenes,
vector<char> &vecResult);
static bool GetDatasetSinfo(CDataPair &Dat, float &mean,
float &stdev);
static void TopologicalOverlap(CDataPair &Dat,
const vector <string> &vecstrGenes);
};
}
#endif
|
fitness.h | void calFit(Chromo *population, int N, int inicio, int fin)
{
int errores;
int k, i, j;
#pragma omp for
for (k = inicio; k < fin; k++)
{
errores = 0;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
if (i != j)
{
if ((population[k].config[i] - i) == (population[k].config[j] - j))
{
errores++;
}
if ((population[k].config[i] + i) == (population[k].config[j] + j))
{
errores++;
}
}
}
}
population[k].fitness = errores;
}
}
void copyBest(Chromo *Best, Chromo local, int N)
{
int i;
#pragma omp for
for (i = 0; i < N; i++)
{
Best->config[i] = local.config[i];
}
Best->fitness = local.fitness;
}
|
cuda_array_bc_nogp.h | //
// Array type with variable padding in x- and y-direction.
// Knows about boundary conditions, routines operating on this datatype are to compute
// them on the fly
//
// Memory Layout
//
// rows: 0...My-1 ... My-1 + pad_y
// cols: 0...Nx-1 ... Nx-1 + pad_x
//
// 0 My-1 ... My - 1 + pad_y
// Nx - 1 |--------- ... ------| |
// |--------- ... ------| |
// ...
// 0 |--------- ... ------| |
//
// idx = n * (My + pad_y) + m
//
// columns (m, y-direction) are consecutive in memory
//
//
// Mapping of CUDA threads on the array:
//
// Columns: 0..My - 1 + pad_y -> col = blockIdx.x * blockDim.x + threadIdx.x
// Rows: 0..Nx - 1 + pad_x -> row = blockIdx.y * blockDim.y + threadIdx.y
//
// dimBlock = (blocksize_row, blocksize_col)
// dimGrid = (My + pad_y) / blocksize_row, (My + pad_y) / blocksize_col
//
// Ghost points are to be computed on the fly, not stored in memory
// They can be access by the address object
#ifndef cuda_array_bc_H_
#define cuda_array_bc_H_
#include <iostream>
#include <iomanip>
#include <fstream>
#include <map>
#include <functional>
#include <sstream>
#include "2dads_types.h"
#include "bounds.h"
#include "address.h"
#include "error.h"
#include "allocators.h"
#if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__)
#warning cuda_array_bc_nogp: compiling for device
#include "cuda_types.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#define LAMBDACALLER __device__
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
std::stringstream err_str;
err_str << "GPUassert: " << cudaGetErrorString(code) << "\t file: " << file << ", line: " << line << "\n";
throw gpu_error(err_str.str());
}
}
// Verify last kernel launch
#define gpuStatus() { gpuVerifyLaunch(__FILE__, __LINE__); }
inline void gpuVerifyLaunch(const char* file, int line)
{
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
std::stringstream err_str;
err_str << "GPUassert: " << cudaGetErrorString(error) << "\t file: " << file << ", line: " << line << "\n";
throw gpu_error(err_str.str());
}
}
#endif
#if !defined(__CUDA__) && !defined(__CUDA_ARCH__)
#warning cuda_array_bc_nogp: compiling for host
struct dim3{
int x;
int y;
int z;
};
#define LAMBDACALLER
#endif //__CUDACC__
namespace device
{
#if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__)
// Return true if row and column are within geom(include padded columns if is_transformed is true)
// Return false if row or column is outside the geometry
__device__ inline bool good_idx(const size_t row, const size_t col, const twodads::slab_layout_t geom, const bool is_transformed)
{
return((row < geom.get_nx()) && (col < (is_transformed ? geom.get_my() + geom.get_pad_y() : geom.get_my())));
}
template <typename T>
__global__
void kernel_set_tlev_ptr(T* data, T** tlev_ptr, const size_t tlevs, const twodads::slab_layout_t geom)
{
for(size_t t = 0; t < tlevs; t++)
{
tlev_ptr[t] = &data[t * geom.get_nelem_per_t()];
}
}
// Apply the lambda op_func (with type given by template parameter O)
// op_func(T, size_t, size_t, slab_layout_t)
template <typename T, typename O>
__global__
void kernel_apply_single(T* array_d_t, O device_func, const twodads::slab_layout_t geom, const bool is_transformed)
{
const size_t col{cuda :: thread_idx :: get_col()};
const size_t row{cuda :: thread_idx :: get_row()};
const size_t index{row * (geom.get_my() + geom.get_pad_y()) + col};
if (good_idx(row, col, geom, is_transformed))
array_d_t[index] = device_func(array_d_t[index], row, col, geom);
}
template<typename T, typename O, size_t ELEMS>
__global__
void kernel_apply_unroll(T* array_d_t, O device_func, const twodads::slab_layout_t geom, const bool is_transformed)
{
const size_t row{cuda :: thread_idx :: get_row()};
const size_t col_0{cuda :: thread_idx :: get_col() * ELEMS};
const size_t index_0{row * (geom.get_my() + geom.get_pad_y()) + col_0};
for(size_t n = 0; n < ELEMS; n++)
{
if (good_idx(row, col_0 + n, geom, is_transformed))
array_d_t[index_0 + n] = device_func(array_d_t[index_0 + n], row, col_0 + n, geom);
}
}
// Perform element-wise arithmetic operation lhs[idx] = op(lhs[idx], rhs[idx])
template<typename T, typename O>
__global__
void kernel_elementwise(T* lhs, T* rhs, O device_func, const twodads::slab_layout_t geom, const bool is_transformed)
{
const size_t col{cuda :: thread_idx :: get_col()};
const size_t row{cuda :: thread_idx :: get_row()};
const size_t index{row * (geom.get_my() + geom.get_pad_y()) + col};
if(good_idx(row, col, geom, is_transformed))
{
lhs[index] = device_func(lhs[index], rhs[index]);
}
}
template<typename T, typename O, size_t ELEMS>
__global__
void kernel_elementwise_unroll(T* lhs, T* rhs, O device_func, const twodads::slab_layout_t geom, const bool is_transformed)
{
const size_t col_0{cuda :: thread_idx :: get_col() * ELEMS};
const size_t row{cuda :: thread_idx :: get_row()};
const size_t index_0{row * (geom.get_my() + geom.get_pad_y()) + col_0};
for(size_t n = 0; n < ELEMS; n++)
{
if(good_idx(row, col_0 + n, geom, is_transformed))
{
lhs[index_0 + n] = device_func(lhs[index_0 + n], rhs[index_0 + n]);
}
}
}
// For accessing elements in GPU kernels and interpolating ghost points
template <typename T>
__global__
void kernel_init_address(address_t<T>** my_address,
const twodads::slab_layout_t geom,
const twodads::bvals_t<T> bvals)
{
//*my_address = new address_t<T>(geom, bvals);
}
template <typename T>
__global__
void kernel_free_address(address_t<T>** my_address)
{
//delete *my_address;
}
template <typename T>
__global__
void kernel_advance_tptr(T** tlev_ptr, const size_t tlevs)
{
T* tmp{tlev_ptr[tlevs - 1]};
for(size_t t = tlevs - 1; t > 0; t--)
{
tlev_ptr[t] = tlev_ptr[t - 1];
}
tlev_ptr[0] = tmp;
}
#endif // __CUDACC_
}
template <typename T, template <typename> class allocator> class cuda_array_bc_nogp;
namespace detail
{
// Initialize data_tlev_ptr:
// data_tlev_ptr[0] = data[0]
// data_tlev_ptr[1] = data[0] + nelem
// ...
#if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__)
template <typename T>
inline void impl_set_data_tlev_ptr(T* data, T** data_tlev_ptr, const size_t tlevs, const twodads::slab_layout_t& geom, allocator_device<T>)
{
device :: kernel_set_tlev_ptr<<<1, 1>>>(data, data_tlev_ptr, tlevs, geom);
gpuErrchk(cudaPeekAtLastError());
}
// Initialize ghost point interpolator
// The next four functions are a bit messy:
// The device implementation uses address_2ptr, an address_t<T>**, while the host implementation uses
// an address_t<T>*.
template <typename T>
inline address_t<T>* impl_init_address(address_t<T>** &address_2ptr, address_t<T>* &address_ptr, const twodads::slab_layout_t& geom, const twodads::bvals_t<T>& bvals, allocator_device<T>)
{
gpuErrchk(cudaMalloc(&address_2ptr, sizeof(address_t<T>**)));
device :: kernel_init_address<<<1, 1>>>(address_2ptr, geom, bvals);
gpuErrchk(cudaPeekAtLastError());
return(address_ptr);
}
template <typename T>
inline void impl_delete_address(address_t<T>** &address_2ptr, address_t<T>* &address_ptr, allocator_device<T>)
{
device :: kernel_free_address<<<1, 1>>>(address_2ptr);
gpuErrchk(cudaPeekAtLastError());
}
// Get data_tlev_ptr for a given time level
// Returns a device-pointer
template <typename T>
inline T* impl_get_data_tlev_ptr(T** data_tlev_ptr, const size_t tidx, const size_t tlevs, allocator_device<T>)
{
T** data_tlev_ptr_hostcopy = new T*[tlevs];
gpuErrchk(cudaMemcpy(data_tlev_ptr_hostcopy, data_tlev_ptr, tlevs * sizeof(T*), cudaMemcpyDeviceToHost));
gpuErrchk(cudaPeekAtLastError());
return data_tlev_ptr_hostcopy[tidx];
}
template <typename T, typename F>
inline void impl_apply(T* data_ptr, F myfunc, const twodads::slab_layout_t& geom, const bool transformed, const dim3& grid_unroll, const dim3& block, allocator_device<T>)
{
device :: kernel_apply_unroll<T, F, cuda::elem_per_thread><<<grid_unroll, block>>>(data_ptr, myfunc, geom, transformed);
gpuErrchk(cudaPeekAtLastError());
}
template <typename T, typename F>
inline void impl_elementwise(T* x, T* rhs, F myfunc, const twodads::slab_layout_t& geom, const bool transformed, const dim3& grid_unroll, const dim3& block, allocator_device<T>)
{
device :: kernel_elementwise_unroll<T, F, cuda::elem_per_thread><<<grid_unroll, block>>>(x, rhs, myfunc, geom, transformed);
gpuErrchk(cudaPeekAtLastError());
}
template <typename T>
inline void impl_advance(T** tlev_ptr, const size_t tlevs, allocator_device<T>)
{
device :: kernel_advance_tptr<<<1, 1>>>(tlev_ptr, tlevs);
gpuErrchk(cudaPeekAtLastError());
}
#endif //__CUDACC__
template <typename T>
inline void impl_set_data_tlev_ptr(T* data, T** data_tlev_ptr, const size_t tlevs, const twodads::slab_layout_t& sl, allocator_host<T>)
{
for(size_t t = 0; t < tlevs; t++)
{
data_tlev_ptr[t] = data + t * sl.get_nelem_per_t();
}
}
template <typename T>
inline void impl_init_address(address_t<T>** &address_2ptr, address_t<T>* &address_ptr, const twodads::slab_layout_t& geom, const twodads::bvals_t<T>& bvals, allocator_host<T>)
{
address_ptr = new address_t<T>(geom, bvals);
}
template <typename T>
inline void impl_delete_address(address_t<T>** &address_2ptr, address_t<T>* &address_ptr, allocator_host<T>)
{
if(address_ptr != nullptr)
delete address_ptr;
address_ptr = nullptr;
}
template <typename T>
inline T* impl_get_data_tlev_ptr(T** data_tlev_ptr, const size_t tidx, const size_t tlevs, allocator_host<T>)
{
return(data_tlev_ptr[tidx]);
}
template <typename T, typename F>
void impl_apply(T* data_ptr, F host_func, const twodads::slab_layout_t& geom, const bool is_transformed, const dim3& grid, const dim3& block, allocator_host<T>)
{
size_t index{0};
size_t m{0};
// Loop over the padded elements if the array is transformed
const size_t nelem_m{is_transformed ? geom.get_my() + geom.get_pad_y() : geom.get_my()};
const size_t my_plus_pad{geom.get_my() + geom.get_pad_y()};
#pragma omp parallel for private(index, m)
for(size_t n = 0; n < geom.get_nx(); n++)
{
// nelem_m is determined at runtime. To vectorize the loop
// determine the number of total iterations when handling 4 elements
// per iteration. The remaining elements are done sequentially
for(m = 0; m < nelem_m - (nelem_m % 4); m += 4)
{
index = n * my_plus_pad + m;
data_ptr[index ] = host_func(data_ptr[index ], n, m , geom);
data_ptr[index + 1] = host_func(data_ptr[index + 1], n, m + 1, geom);
data_ptr[index + 2] = host_func(data_ptr[index + 2], n, m + 2, geom);
data_ptr[index + 3] = host_func(data_ptr[index + 3], n, m + 3, geom);
}
for(; m < nelem_m; m++)
{
index = n * (my_plus_pad) + m;
data_ptr[index] = host_func(data_ptr[index], n, m, geom);
}
}
}
template <typename T, typename F>
void impl_elementwise(T* lhs, T* rhs, F host_func, const twodads::slab_layout_t& geom, const bool is_transformed, const dim3& grid, const dim3& block, allocator_host<T>)
{
//host :: host_elementwise(lhs, rhs, myfunc, geom, transformed);
size_t index{0};
size_t m{0};
size_t n{0};
// Iterate over the padding elements the array is transformed
// Skip the padding elements if the array is not transformed
const size_t nelem_m{is_transformed ? geom.get_my() + geom.get_pad_y() : geom.get_my()};
// Use the padded array size to compute indices, whether array is transformed or not.
const size_t my_plus_pad{geom.get_my() + geom.get_pad_y()};
#pragma omp parallel for private(index, m)
for(n = 0; n < geom.get_nx(); n++)
{
// Loop vectorization scheme follows host_apply (above)
for(m = 0; m < nelem_m - (nelem_m % 4); m += 4)
{
index = n * my_plus_pad + m;
lhs[index ] = host_func(lhs[index ], rhs[index ]);
lhs[index + 1] = host_func(lhs[index + 1], rhs[index + 1]);
lhs[index + 2] = host_func(lhs[index + 2], rhs[index + 2]);
lhs[index + 3] = host_func(lhs[index + 3], rhs[index + 3]);
}
for(; m < nelem_m; m++)
{
index = n * my_plus_pad + m;
lhs[index] = host_func(lhs[index], rhs[index]);
}
}
}
template <typename T>
inline void impl_advance(T** tlev_ptr, const size_t tlevs, allocator_host<T>)
{
T* tmp{tlev_ptr[tlevs - 1]};
for(size_t t = tlevs - 1; t > 0; t--)
{
tlev_ptr[t] = tlev_ptr[t - 1];
}
tlev_ptr[0] = tmp;
tmp = nullptr;
}
}
/**
.. cpp:class:: template <typename T, template <typename> class allocator> cuda_array_bc_npgp
Basic 2d vector used in 2dads.
It can store the data of fields, at several time steps.
It interpolates values of ghost cells by bval_interpolators.
Memory Layout
The class maps a two-dimensional array to memory space.
Data positions are given in rows and columns, where columns are along
the x-direction and rows are along the y-direction:
.. math::
n = 0\ldots N_x - 1, N_x, \ldots N_x + \mathrm{pad}_x - 1
Okay, lets leave math mode
*/
/*
Columns are stored consecutively in memory. To traverse the array in memory define
.. math::
idx = n * (M_y + pad_y) + m
*/
/*
0 My-1 ... My - 1 + pad_y
Nx - 1 |--------- ... ------| |
|--------- ... ------| |
...
0 |--------- ... ------| |
idx = n * (My + pad_y) + m
columns (m, y-direction) are consecutive in memory
Mapping of CUDA threads on the array:
Columns: 0..My - 1 + pad_y -> col = blockIdx.x * blockDim.x + threadIdx.x
Rows: 0..Nx - 1 + pad_x -> row = blockIdx.y * blockDim.y + threadIdx.y
dimBlock = (blocksize_row, blocksize_col)
dimGrid = (My + pad_y) / blocksize_row, (My + pad_y) / blocksize_col
Ghost points are to be computed on the fly, not stored in memory
They can be access by the address object
*/
template <typename T, template <typename> class allocator>
class cuda_array_bc_nogp{
public:
/**
.. cpp:type:: allocator_type = my_allocator_traits<T, allocator> :: allocator_type
Declaration of a type alias for the used memory allocator
*/
using allocator_type = typename my_allocator_traits<T, allocator> :: allocator_type;
/**
.. cpp:type:: deleter_type = my_allocator_traits<T, allocator> :: deleter_type
Declaration of a type alias for the used deleter
*/
using deleter_type = typename my_allocator_traits<T, allocator> :: deleter_type;
/**
.. cpp:type:: ptr_type = std::unique_ptr<T, deleter_type>
Type alias of the internally used pointers
*/
using ptr_type = std::unique_ptr<T, deleter_type>;
// T** pointers
using p_allocator_type = typename my_allocator_traits<T*, allocator> :: allocator_type;
using p_deleter_type = typename my_allocator_traits<T*, allocator> :: deleter_type;
using pptr_type = std::unique_ptr<T*, p_deleter_type>;
/**
.. cpp:function:: cuda_array_bc_nogp(const twodads::slab_layout_t, const twodads::bvals_t<T>, size_t _tlevs)
Default constructor. Takes information on slab layout and boundary conditions.
Stores data for _tlevs time levels
*/
cuda_array_bc_nogp(const twodads::slab_layout_t, const twodads::bvals_t<T>, size_t _tlevs);
cuda_array_bc_nogp(const cuda_array_bc_nogp<T, allocator>* rhs);
cuda_array_bc_nogp(const cuda_array_bc_nogp<T, allocator>& rhs);
/**
.. cpp:function:: cuda_array_bc_nogp::~cuda_array_bc_nogp()
Free all allocated resources
*/
~cuda_array_bc_nogp()
{
detail :: impl_delete_address(address_2ptr, address_ptr, allocator_type{});
};
/**
.. cpp:function:: template <typename F> inline void cuda_array_bc_nogp::apply(F myfunc, const size_t tidx)
Apply F on all array elements at tidx
====== ====================================================
Input Description
====== ====================================================
myfunc F, functor taking 2 T as input
tidx const size_t - Time index on which myfunc is applied
====== ====================================================
*/
template <typename F> inline void apply(F myfunc, const size_t tidx)
{
check_bounds(tidx + 1, 0, 0);
detail :: impl_apply(get_tlev_ptr(tidx), myfunc, get_geom(), is_transformed(tidx), get_grid_unroll(), get_block(), allocator_type{});
}
/**
.. cpp:function:: template <typename F> inline void cuda_array_bc_nogp::elementwise(F myfunc, const cuda_array_bc_nogp<T, allocator>& rhs, const size_t tidx_rhs, const siz_t tidx_lhs)
Evaluates myfunc(l, r) elementwise on elements l, r of arrays lhs rhs.
Stores result in lhs.
Input:
======== ==================================================
Input Description
======== ==================================================
myfunc callable, takes two T as input, returns T
rhs const cuda_array_bc_nogp<T, allocator>&, RHS array
tidx_rhs const size_t, time index of RHS array
tidx_lhs const size_t, time index of LHS array
======== ==================================================
*/
template<typename F> inline void elementwise(F myfunc, const cuda_array_bc_nogp<T, allocator>& rhs,
const size_t tidx_lhs, const size_t tidx_rhs)
{
rhs.check_bounds(tidx_rhs + 1, 0, 0);
check_bounds(tidx_lhs + 1, 0, 0);
assert(rhs.get_geom() == get_geom());
assert(is_transformed(tidx_lhs) == rhs.is_transformed(tidx_rhs));
detail :: impl_elementwise(get_tlev_ptr(tidx_lhs), rhs.get_tlev_ptr(tidx_rhs), myfunc, get_geom(), is_transformed(tidx_lhs) | rhs.is_transformed(tidx_rhs), get_grid(), get_block(), allocator_type{});
}
/**
.. cpp:function:: template <typename F> inline void cuda_array_bc_nogp::elementwise(F myfunc, const size_t tidx_lhs, const siz_t tidx_rhs)
Evaluates myfunc(l1, l2) elementwise on elements l1, l2 of arrays lhs on time indices t1 and t2.
Stores result in lhs at tidx t1.
======== =========================================
Input Description
======== =========================================
myfunc callable, takes two T as input, returns T
tidx_lhs const size_t, time index t1 for array
tidx_rhs const size_t, time index t2 for array
======== =========================================
*/
template<typename F> inline void elementwise(F myfunc, const size_t tidx_lhs, const size_t tidx_rhs)
{
//std::cout << "elementwise w/o rhs, tidx_rhs = " << tidx_rhs << ", tidx_lhs = " << tidx_lhs << std::endl;
check_bounds(tidx_rhs + 1, 0, 0);
check_bounds(tidx_lhs + 1, 0, 0);
detail :: impl_elementwise(get_tlev_ptr(tidx_lhs), get_tlev_ptr(tidx_rhs), myfunc, get_geom(), is_transformed(tidx_lhs) | is_transformed(tidx_rhs), get_grid(), get_block(), allocator_type{});
}
/**
.. cpp:function:: inline void cuda_array_bc_nogp::copy(const size_t tidx_dst, const size_t tidx_src)
Copy data from tidx_src to tidx_dst.
======== =======================================
Input Description
======== =======================================
tidx_dst const size_t, time index of destination
tidx_src const size_t, time index of source
======== =======================================
*/
inline void copy(const size_t tidx_dst, const size_t tidx_src)
{
check_bounds(tidx_dst + 1, 0, 0);
check_bounds(tidx_src + 1, 0, 0);
my_alloc.copy(get_tlev_ptr(tidx_src), get_tlev_ptr(tidx_src) + get_geom().get_nelem_per_t(), get_tlev_ptr(tidx_dst));
set_transformed(tidx_dst, is_transformed(tidx_src));
}
/**
.. cpp:function:: inline void cuda_array_bc_nogp::copy(size_t tidx_dst, const cuda_array_bc_nogp<T, allocator>& src, size_t tidx_src)
Copy data from array rhs at tidx_src to tidx_dst.
======== ==========================================================
Input Description
======== ==========================================================
rhs const cuda_array_bc_nogp<T, allocator>& rhs: source array
tidx_dst const size_t, time index of Destination
tidx_src const size_t, time index of source
======== ==========================================================
*/
inline void copy(size_t tidx_dst, const cuda_array_bc_nogp<T, allocator>& src, size_t tidx_src)
{
check_bounds(tidx_dst + 1, 0, 0);
src.check_bounds(tidx_src + 1, 0, 0);
assert(get_geom() == src.get_geom());
my_alloc.copy(src.get_tlev_ptr(tidx_src), src.get_tlev_ptr(tidx_src) + src.get_geom().get_nelem_per_t(), get_tlev_ptr(tidx_dst));
set_transformed(tidx_dst, src.is_transformed(tidx_src));
}
// Move data from t_src to t_dst, zero out t_src
/**
.. cpp:function: inline void cuda_array_bc_nogp::move(const size_t tidx_dst, const size_t tidx_src)
Move data from tidx_src to tidx_dst, zero out data at tidx_src
======== =======================================
Input Description
======== =======================================
tidx_dst const size_t, time index of destination
tidx_src const size_t, time index of source
======== =======================================
- const size_t tidx_dst: Destination time index
- const size_t tidx_src: Source time index
*/
inline void move(const size_t tidx_dst, const size_t tidx_src)
{
check_bounds(tidx_dst + 1, 0, 0);
check_bounds(tidx_src + 1, 0, 0);
my_alloc.copy(get_tlev_ptr(tidx_src), get_tlev_ptr(tidx_src) + get_geom().get_nelem_per_t(), get_tlev_ptr(tidx_dst));
apply([] LAMBDACALLER (T dummy, const size_t n, const size_t m, twodads::slab_layout_t geom) -> T {return(0.0);}, 0);
}
/**
.. cpp:function:: inline void cuda_array_bc_nogp::advance()
Advance data from tidx -> tidx + 1. Zero out data at tidx 0, discard data at last
*/
inline void advance()
{
detail :: impl_advance(get_tlev_ptr(), get_tlevs(), allocator_type{});
apply([] LAMBDACALLER (T dummy, const size_t n, const size_t m, twodads::slab_layout_t geom) -> T {return(0.0);}, 0);
for(size_t tidx = get_tlevs() - 1; tidx > 0; tidx--)
set_transformed(tidx, is_transformed(tidx - 1));
set_transformed(0, false);
}
/**
.. cpp:function:: inline size_t cuda_array_bc_nogp :: get_nx() const
Returns number of discretization points along x-direction.
*/
inline size_t get_nx() const {return(get_geom().get_nx());};
/**
.. cpp:function:: inline size_t cuda_array_bc_nogp::get_my() const
Returns number of discretization points along y-direction.
*/
inline size_t get_my() const {return(get_geom().get_my());};
/**
.. cpp:function:: inline size_t cuda_array_bc_nogp::get_tlevs() const
Returns the number of time levels
*/
inline size_t get_tlevs() const {return(tlevs);};
/**
.. cpp:function:: inline twodads::slab_layout_t cuda_array_bc_nogp::get_geom() const
Returns the layout of the array
*/
inline twodads::slab_layout_t get_geom() const {return(geom);};
/**
.. cpp:function:: template <typename T> inline twodads::bvals_t<T> cuda_array_bc_nogp::get_bvals() const
Returns the boundary values of the array
*/
inline twodads::bvals_t<T> get_bvals() const {return(boundaries);};
// We are working with 2 pointer levels, since we instantiate an address object
// in a cuda kernel in the constructor. That way, we can just pass this
// pointer to all cuda kernels that need an address object.
// Call a separate kernel in the destructor to delete it.
// Unfortunately, this means we have to use 2 pointer levels also in cpu functions.
inline address_t<T>** get_address_2ptr() const {return(address_2ptr);};
inline address_t<T>* get_address_ptr() const {return(address_ptr);};
/**
.. cpp:function:: inline dim3 get_grid() const
Returns grid layout for CUDA kernels.
*/
inline dim3 get_grid() const {return grid;};
/**
.. cpp:function:: inline dim3 get_grid_unroll() const
Return grid layout for CUDA kernels that operate with manual loop-unrolling.
*/
inline dim3 get_grid_unroll() const {return grid_unroll;};
/**
.. cpp:function:: inline dim3 get_block() const
Return block size for CUDA kernels.
*/
inline dim3 get_block() const {return block;};
/**
.. cpp:function:: template <typename T> inline T* cuda_array_bc_nogp::get_data() const
Returns pointer to the data array.
*/
inline T* get_data() const {return data.get();};
/*
.. cpp:function:: template <typename T> inline T** cuda_array_bc_nogp::get_tlev_ptr() const
Retuns pointer to pointer at tlev data.
*/
inline T** get_tlev_ptr() const {return data_tlev_ptr.get();};
// Pointer to device data at time level t
/*
.. cpp:function:: template <typename T> inline T* cuda_array_bc_nogp::get_tlev_ptr(const size_t tidx) const
Returns pointer to data at time level tidx.
===== =============================
Input Description
===== =============================
tidx const size_t, time level tidx
===== =============================
*/
inline T* get_tlev_ptr(const size_t tidx) const
{
check_bounds(tidx + 1, 0, 0);
return(detail :: impl_get_data_tlev_ptr(get_tlev_ptr(), tidx, get_tlevs(), allocator_type{}));
};
// Set true if transformed
/*
.. cpp:function:: inline bool cuda_array_bc_nogp<T, allocator<T>> :: is_transformed(const size_t tidx)
Returns true if array is transformed, false if data is in configuation space
====== =======================================================
Input Description
====== ======================================================
tidx Time index where to check whether array is transformed
====== ======================================================
*/
inline bool is_transformed(const size_t tidx) const {check_bounds(tidx + 1, 0, 0); return(transformed[tidx]);};
inline bool set_transformed(const size_t tidx, const bool val)
{
check_bounds(tidx + 1, 0, 0);
transformed[tidx] = val;
return(transformed[tidx]);
};
private:
const twodads::bvals_t<T> boundaries;
const twodads::slab_layout_t geom;
const size_t tlevs;
const bounds check_bounds;
std::vector<bool> transformed;
allocator_type my_alloc;
p_allocator_type my_palloc;
// The cuda implementation uses this one. address_t is instantiated once on the device
address_t<T>** address_2ptr;
// The host implementation uses this one.
address_t<T>* address_ptr;
// block and grid for access without ghost points, use these normally
const dim3 block;
const dim3 grid;
const dim3 grid_unroll;
// Size of shared memory bank
const size_t shmem_size_col;
// Array data is on device
// Pointer to device data
ptr_type data;
// Pointer to each time stage. Pointer to array of pointers on device
pptr_type data_tlev_ptr;
};
template <typename T, template<typename> class allocator>
cuda_array_bc_nogp<T, allocator> :: cuda_array_bc_nogp (const twodads::slab_layout_t _geom, const twodads::bvals_t<T> _bvals, const size_t _tlevs) :
boundaries(_bvals),
geom(_geom),
tlevs(_tlevs),
check_bounds(get_tlevs(), get_nx(), get_my()),
transformed{std::vector<bool>(get_tlevs(), 0)},
address_2ptr{nullptr},
address_ptr{nullptr},
#if defined(__clang__) && defined(__CUDA__) && defined(__CUDA_ARCH__)
block(dim3(cuda::blockdim_row, cuda::blockdim_col)),
grid(dim3(((get_my() + get_geom().get_pad_y()) + cuda::blockdim_row - 1) / cuda::blockdim_row,
((get_nx() + get_geom().get_pad_x()) + cuda::blockdim_col - 1) / cuda::blockdim_col)),
grid_unroll(grid.x / cuda :: elem_per_thread, grid.y),
# else
block{0, 0, 0},
grid{0, 0, 0},
grid_unroll{0, 0, 0},
#endif
shmem_size_col(get_nx() * sizeof(T)),
data(my_alloc.allocate(get_tlevs() * get_geom().get_nelem_per_t())),
data_tlev_ptr(my_palloc.allocate(get_tlevs()))
{
// Set the pointer in array_tlev_ptr to data[0], data[0] + get_nelem_per_t(), data[0] + 2 * get_nelem_per_t() ...
detail :: impl_set_data_tlev_ptr(get_data(), get_tlev_ptr(), get_tlevs(), get_geom(), allocator_type{});
// Initialize the address object
detail :: impl_init_address(address_2ptr, address_ptr, get_geom(), get_bvals(), allocator_type{});
for(size_t tidx = 0; tidx < tlevs; tidx++)
apply([] LAMBDACALLER (T dummy, const size_t n, const size_t m, twodads::slab_layout_t geom) -> T {return(0.0);}, tidx);
}
template <typename T, template <typename> class allocator>
cuda_array_bc_nogp<T, allocator> :: cuda_array_bc_nogp(const cuda_array_bc_nogp<T, allocator>* rhs) :
cuda_array_bc_nogp(rhs -> get_geom(), rhs -> get_bvals(), rhs -> get_tlevs())
{
my_alloc.copy(rhs -> get_data(), rhs -> get_data() + get_tlevs() * get_geom().get_nelem_per_t(), get_data());
my_palloc.copy(rhs -> get_tlev_ptr(), rhs -> get_tlev_ptr() + get_tlevs(), get_tlev_ptr());
};
template <typename T, template <typename> class allocator>
cuda_array_bc_nogp<T, allocator> :: cuda_array_bc_nogp(const cuda_array_bc_nogp<T, allocator>& rhs) :
cuda_array_bc_nogp(rhs.get_geom(), rhs.get_bvals(), rhs.get_tlevs())
{
my_alloc.copy(rhs.get_data(), rhs.get_data() + get_tlevs() * get_geom().get_nelem_per_t(), get_data());
my_palloc.copy(rhs.get_tlev_ptr(), rhs.get_tlev_ptr() + get_tlevs(), get_tlev_ptr());
};
#endif // cuda_array_bc_H_
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(12*t1+Ny+15,32)),floord(24*t2+Ny+11,32)),floord(24*t1-24*t2+Nz+Ny+13,32));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-254,256)),ceild(3*t1-510,512)),ceild(24*t2-Nz-2035,2048)),ceild(32*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(12*t1+Nx+15,2048)),floord(24*t2+Nx+11,2048)),floord(32*t3+Nx+19,2048)),floord(24*t1-24*t2+Nz+Nx+13,2048));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),8*t3+6),512*t4+510);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
utils.h | #ifndef _UTILS_
#define _UTILS_
#include "common.h"
#include "cusparse.h"
void equal_col_partition(int ngpu, int cols, int* counts, int* displs)
{
int colCount = round((double)cols / ngpu);
for(int i = 0; i < ngpu - 1; i++)
{
counts[i] = colCount;
displs[i] = i * colCount;
}
counts[ngpu - 1] = cols - (ngpu - 1) * colCount;
displs[ngpu - 1] = (ngpu - 1) * colCount;
}
void equal_nnz_partition(int ngpu, int ncols, int nnz, const int* cols, int* counts, int* displs)
{
// count nnz for each col.
int* col_elements = (int*)malloc(sizeof(int) * ncols);
for(int i = 0; i < ncols; i++){
col_elements[i] = cols[i + 1] - cols[i];
}
// first find a lower bound nnz for processes
// by applying binary search.
int low = 0, high = nnz;
while(low < high)
{
// the range of searching
int mid = (low + high) / 2;
// calculate how many process are needed so that each
// process receives up to nnz elements
int sum = 0, need = 0;
for(int i = 0; i < ncols; i++){
if (sum + col_elements[i] > mid) {
sum = col_elements[i];
need++;
} else {
sum += col_elements[i];
}
}
// update the searching range.
if (need < ngpu)
high = mid;
else
low = mid + 1;
}
// split cols by processes according to lower bound.
int sum = 0, counter = 0;
for(int i = 0; i < ncols; i++){
if(sum + col_elements[i] > low){
sum = col_elements[i];
counter++;
counts[counter] = 1;
displs[counter] = i;
} else {
sum += col_elements[i];
counts[counter]++;
}
}
}
// print 1D array
template<typename T>
void print_1darray(T *input, int length)
{
for (int i = 0; i < length; i++)
printf("%i, ", input[i]);
printf("\n");
}
/*
__forceinline__ __device__
static double atomicAdd(double *addr, double val)
{
double old = *addr, assumed;
do
{
assumed = old;
old = __longlong_as_double(
atomicCAS((unsigned long long int*)addr,
__double_as_longlong(assumed),
__double_as_longlong(val+assumed)));
}while(assumed != old);
return old;
}*/
template<typename vT>
__forceinline__ __device__
vT sum_32_shfl(vT sum)
{
#pragma unroll
for(int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1)
sum += __shfl_xor(sum, mask);
return sum;
}
/*struct assembly_timer {
timeval t1, t2;
struct timezone tzone;
void start() {
gettimeofday(&t1, &tzone);
}
double stop() {
gettimeofday(&t2, &tzone);
double elapsedTime = 0;
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
return elapsedTime;
}
};*/
void check_cusparse_kernel(cusparseStatus_t cudaerr)
{
if (cudaerr != CUSPARSE_STATUS_SUCCESS)
printf("cuda kernel fail, err = %s\n", cudaerr);
}
template<typename T>
void swap(T *a , T *b)
{
T tmp = *a;
*a = *b;
*b = tmp;
}
// quick sort key-value pair (child function)
template<typename iT, typename vT>
int partition(iT *key, vT *val, int length, int pivot_index)
{
int i = 0 ;
int small_length = pivot_index;
iT pivot = key[pivot_index];
swap<iT>(&key[pivot_index], &key[pivot_index + (length - 1)]);
swap<vT>(&val[pivot_index], &val[pivot_index + (length - 1)]);
for(; i < length; i++)
{
if(key[pivot_index+i] < pivot)
{
swap<iT>(&key[pivot_index+i], &key[small_length]);
swap<vT>(&val[pivot_index+i],&val[small_length]);
small_length++;
}
}
swap<iT>(&key[pivot_index + length - 1], &key[small_length]);
swap<vT>(&val[pivot_index + length - 1],&val[small_length]);
return small_length;
}
// quick sort key-value pair (main function)
template<typename iT, typename vT>
void quick_sort_key_val_pair(iT *key, vT *val, int length)
{
if(length == 0 || length == 1)
return;
int small_length = partition<iT, vT>(key, val, length, 0) ;
quick_sort_key_val_pair<iT, vT>(key, val, small_length);
quick_sort_key_val_pair<iT, vT>(&key[small_length + 1], &val[small_length + 1], length - small_length - 1);
}
/*
template<typename iT>
void move_block(iT* first,
iT* last,
iT* result)
{
//memcpy(result, first, sizeof(iT) * (last - first));
while (first != last)
{
*result = *first;
++result;
++first;
}
}
template<typename iT, typename vT>
void serial_merge(iT* key_left_start,
iT* key_left_end,
iT* key_right_start,
iT* key_right_end,
iT* key_output,
vT* val_left_start,
vT* val_left_end,
vT* val_right_start,
vT* val_right_end,
vT* val_output)
{
while(key_left_start != key_left_end && key_right_start != key_right_end)
{
bool which = *key_right_start < *key_left_start;
//*key_output++ = std::move(which ? *key_right_start++ : *key_left_start++);
*key_output++ = which ? *key_right_start++ : *key_left_start++;
*val_output++ = which ? *val_right_start++ : *val_left_start++;
}
//std::move( key_left_start, key_left_end, key_output );
move_block<iT>(key_left_start, key_left_end, key_output);
move_block<vT>(val_left_start, val_left_end, val_output);
//std::move( key_right_start, key_right_end, key_output );
move_block<iT>(key_right_start, key_right_end, key_output);
move_block<vT>(val_right_start, val_right_end, val_output);
}
// merge sequences [key_left_start,key_left_end) and [key_right_start,key_right_end)
// to output [key_output, key_output+(key_left_end-key_left_start)+(key_right_end-key_right_start))
template<typename iT, typename vT>
void parallel_merge(iT* key_left_start,
iT* key_left_end,
iT* key_right_start,
iT* key_right_end,
iT* key_output,
vT* val_left_start,
vT* val_left_end,
vT* val_right_start,
vT* val_right_end,
vT* val_output)
{
const size_t MERGE_CUT_OFF = 2000;
if( key_left_end - key_left_start + key_right_end - key_right_start <= MERGE_CUT_OFF)
{
serial_merge<iT, vT>(key_left_start, key_left_end, key_right_start, key_right_end, key_output,
val_left_start, val_left_end, val_right_start, val_right_end, val_output);
}
else
{
iT *key_left_middle, *key_right_middle;
vT *val_left_middle, *val_right_middle;
if(key_left_end - key_left_start < key_right_end - key_right_start)
{
key_right_middle = key_right_start + (key_right_end - key_right_start) / 2;
val_right_middle = val_right_start + (val_right_end - val_right_start) / 2;
key_left_middle = std::upper_bound(key_left_start, key_left_end, *key_right_middle);
val_left_middle = val_left_start + (key_left_middle - key_left_start);
}
else
{
key_left_middle = key_left_start + (key_left_end - key_left_start) / 2;
val_left_middle = val_left_start + (val_left_end - val_left_start) / 2;
key_right_middle = std::lower_bound(key_right_start, key_right_end, *key_left_middle);
val_right_middle = val_right_start + (key_right_middle - key_right_start);
}
iT* key_output_middle = key_output + (key_left_middle - key_left_start) + (key_right_middle - key_right_start);
iT* val_output_middle = val_output + (val_left_middle - val_left_start) + (val_right_middle - val_right_start);
#pragma omp task
parallel_merge<iT, vT>(key_left_start, key_left_middle, key_right_start, key_right_middle, key_output,
val_left_start, val_left_middle, val_right_start, val_right_middle, val_output);
parallel_merge<iT, vT>(key_left_middle, key_left_end, key_right_middle, key_right_end, key_output_middle,
val_left_middle, val_left_end, val_right_middle, val_right_end, val_output_middle);
#pragma omp taskwait
}
}
// sorts [key_start,key_end).
// key_temp[0:key_end-key_start) is temporary buffer supplied by caller.
// result is in [key_start,key_end) if inplace==true,
// otherwise in key_temp[0:key_end-key_start).
template<typename iT, typename vT>
void parallel_merge_sort(iT* key_start,
iT* key_end,
iT* key_temp,
vT* val_start,
vT* val_end,
vT* val_temp,
bool inplace)
{
const size_t SORT_CUT_OFF = 500;
if(key_end - key_start <= SORT_CUT_OFF)
{
//std::stable_sort(key_start, key_end);
int list_length = key_end - key_start;
quick_sort_key_val_pair(key_start, val_start, list_length);
if(!inplace)
{
//std::move( key_start, key_end, key_temp );
move_block<iT>(key_start, key_end, key_temp);
move_block<vT>(val_start, val_end, val_temp);
}
}
else
{
iT* key_middle = key_start + (key_end - key_start) / 2;
vT* val_middle = val_start + (val_end - val_start) / 2;
iT* key_temp_middel = key_temp + (key_middle - key_start);
vT* val_temp_middel = val_temp + (val_middle - val_start);
iT* key_temp_end = key_temp + (key_end - key_start);
vT* val_temp_end = val_temp + (val_end - val_start);
#pragma omp task
parallel_merge_sort<iT, vT>(key_start, key_middle, key_temp,
val_start, val_middle, val_temp,
!inplace);
parallel_merge_sort<iT, vT>(key_middle, key_end, key_temp_middel,
val_middle, val_end, val_temp_middel,
!inplace);
#pragma omp taskwait
if(inplace)
parallel_merge<iT, vT>(key_temp, key_temp_middel, key_temp_middel, key_temp_end, key_start,
val_temp, val_temp_middel, val_temp_middel, val_temp_end, val_start);
else
parallel_merge<iT, vT>(key_start, key_middle, key_middle, key_end, key_temp,
val_start, val_middle, val_middle, val_end, val_temp);
}
}
// OpenMP tasks do not run in parallel unless launched inside a thread team.
// This outer wrapper shows how to create the thread team and run the top-level call.
template<typename iT, typename vT>
void do_parallel_merge_sort(iT* key_start,
iT* key_end,
iT* key_temp,
vT* val_start,
vT* val_end,
vT* val_temp,
bool inplace)
{
// Create a thread team.
#pragma omp parallel
// Make only one thread do the top-level call.
// Other threads in team pick up spawned tasks.
#pragma omp single
{
parallel_merge_sort<iT, vT>(key_start, key_end, key_temp,
val_start, val_end, val_temp,
inplace);
}
}
// merge sort key-value pair (main function)
template<typename iT, typename vT>
void omp_merge_sort_key_val_pair(iT *key, vT *val, int length)
{
//quick_sort_key_val_pair<iT, vT>(key, val, length);
if(length == 0 || length == 1)
return;
// allocate temp space for out-of-place merge sort
iT *key_temp = (iT *)malloc(length * sizeof(iT));
vT *val_temp = (vT *)malloc(length * sizeof(vT));
bool inplace = true;
do_parallel_merge_sort<iT, vT>(&key[0], &key[length], key_temp,
&val[0], &val[length], val_temp,
inplace);
// free temp space
free(key_temp);
free(val_temp);
}*/
// in-place exclusive scan
template<typename T>
void exclusive_scan(T *input, int length)
{
if(length == 0 || length == 1)
return;
T old_val, new_val;
old_val = input[0];
input[0] = 0;
for (int i = 1; i < length; i++)
{
new_val = input[i];
input[i] = old_val + input[i-1];
old_val = new_val;
}
}
// segmented sum
template<typename vT, typename bT>
void segmented_sum(vT *input, bT *bit_flag, int length)
{
if(length == 0 || length == 1)
return;
for (int i = 0; i < length; i++)
{
if (bit_flag[i])
{
int j = i + 1;
while (!bit_flag[j] && j < length)
{
input[i] += input[j];
j++;
}
}
}
}
// reduce sum
template<typename T>
T reduce_sum(T *input, int length)
{
if(length == 0)
return 0;
T sum = 0;
for (int i = 0; i < length; i++)
{
sum += input[i];
}
return sum;
}
#endif
|
GB_unaryop__identity_bool_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_int32
// op(A') function: GB_tran__identity_bool_int32
// C type: bool
// A type: int32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_int32
(
bool *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int64_uint32
// op(A') function: GB_tran__abs_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int64_uint32
(
int64_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
restriction.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
static inline void restriction_pc_block(level_type *level_c, int id_c, level_type *level_f, int id_f, blockCopy_type *block, int restrictionType){
// restrict 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int dim_i = block->dim.i; // calculate the dimensions of the resultant coarse block
int dim_j = block->dim.j;
int dim_k = block->dim.k;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_f->my_boxes[ block->read.box].vectors[id_f] + level_f->my_boxes[ block->read.box].ghosts*(1+level_f->my_boxes[ block->read.box].jStride+level_f->my_boxes[ block->read.box].kStride);
read_jStride = level_f->my_boxes[block->read.box ].jStride;
read_kStride = level_f->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_c->my_boxes[block->write.box].vectors[id_c] + level_c->my_boxes[block->write.box].ghosts*(1+level_c->my_boxes[block->write.box].jStride+level_c->my_boxes[block->write.box].kStride);
write_jStride = level_c->my_boxes[block->write.box].jStride;
write_kStride = level_c->my_boxes[block->write.box].kStride;
}
int i,j,k;
switch(restrictionType){
case RESTRICT_CELL:
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride;
int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride;
write[write_ijk] = ( read[read_ijk ]+read[read_ijk+1 ] +
read[read_ijk +read_jStride ]+read[read_ijk+1+read_jStride ] +
read[read_ijk +read_kStride]+read[read_ijk+1 +read_kStride] +
read[read_ijk +read_jStride+read_kStride]+read[read_ijk+1+read_jStride+read_kStride] ) * 0.125;
}}}break;
case RESTRICT_FACE_I:
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride;
int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride;
write[write_ijk] = ( read[read_ijk ] +
read[read_ijk+read_jStride ] +
read[read_ijk +read_kStride] +
read[read_ijk+read_jStride+read_kStride] ) * 0.25;
}}}break;
case RESTRICT_FACE_J:
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride;
int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride;
write[write_ijk] = ( read[read_ijk ] +
read[read_ijk+1 ] +
read[read_ijk +read_kStride] +
read[read_ijk+1+read_kStride] ) * 0.25;
}}}break;
case RESTRICT_FACE_K:
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int write_ijk = ((i )+write_i) + ((j )+write_j)*write_jStride + ((k )+write_k)*write_kStride;
int read_ijk = ((i<<1)+ read_i) + ((j<<1)+ read_j)* read_jStride + ((k<<1)+ read_k)* read_kStride;
write[write_ijk] = ( read[read_ijk ] +
read[read_ijk+1 ] +
read[read_ijk +read_jStride] +
read[read_ijk+1+read_jStride] ) * 0.25;
}}}break;
}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) restriction
void restriction(level_type * level_c, int id_c, level_type *level_f, int id_f, int restrictionType){
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x5;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f sends and level_c recvs...
int nMessages = level_c->restriction[restrictionType].num_recvs + level_f->restriction[restrictionType].num_sends;
MPI_Request *recv_requests = level_f->restriction[restrictionType].requests;
MPI_Request *send_requests = level_f->restriction[restrictionType].requests + level_c->restriction[restrictionType].num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->restriction[restrictionType].num_recvs;n++){
MPI_Irecv(level_c->restriction[restrictionType].recv_buffers[n],
level_c->restriction[restrictionType].recv_sizes[n],
MPI_DOUBLE,
level_c->restriction[restrictionType].recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.restriction_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[0])
for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[0];buffer++){
restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[0][buffer],restrictionType);
}
_timeEnd = CycleTime();
level_f->cycles.restriction_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->restriction[restrictionType].num_sends;n++){
MPI_Isend(level_f->restriction[restrictionType].send_buffers[n],
level_f->restriction[restrictionType].send_sizes[n],
MPI_DOUBLE,
level_f->restriction[restrictionType].send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.restriction_send += (_timeEnd-_timeStart);
#endif
// perform local restriction[restrictionType]... try and hide within Isend latency...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[1])
for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[1];buffer++){
restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[1][buffer],restrictionType);
}
_timeEnd = CycleTime();
level_f->cycles.restriction_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level_f->restriction[restrictionType].requests,level_f->restriction[restrictionType].status);
_timeEnd = CycleTime();
level_f->cycles.restriction_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->restriction[restrictionType].num_blocks[2])
for(buffer=0;buffer<level_c->restriction[restrictionType].num_blocks[2];buffer++){
CopyBlock(level_c,id_c,&level_c->restriction[restrictionType].blocks[2][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.restriction_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.restriction_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
merge.h | #pragma once
#include "heap.h"
// Id Type: C::TI
// Distance type: C::T
template <class C>
void merge(typename C::T *dis1, typename C::TI *id1, typename C::T *dis2,
typename C::TI *id2, int64_t nq, int64_t topk, int64_t data2_base) {
using DIS_T = typename C::T;
using ID_T = typename C::TI;
#pragma omp parallel
{
DIS_T *work_dis = new DIS_T[topk];
ID_T *work_id = new ID_T[topk];
#pragma omp for
for (int64_t q_i = 0; q_i < nq; q_i++) {
auto d1 = dis1 + q_i * topk;
auto i1 = id1 + q_i * topk;
auto d2 = dis2 + q_i * topk;
auto i2 = id2 + q_i * topk;
int64_t i = 0, j = 0, k = 0;
while (i < topk) {
if (C::cmp(d1[j], d2[k])) {
work_dis[i] = d2[k];
work_id[i] = i2[k] + data2_base;
k++;
} else {
work_dis[i] = d1[j];
work_id[i] = i1[j];
j++;
}
i++;
}
memcpy(d1, work_dis, topk * sizeof(DIS_T));
memcpy(i1, work_id, topk * sizeof(ID_T));
}
delete[] work_dis;
delete[] work_id;
}
} |
bli_dotv_opt_var1.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas at Austin nor the names
of its contributors may be used to endorse or promote products
derived derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_opt_var1
(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho,
cntx_t* cntx
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho, cntx );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in place, in which case C or
// A are modified in place. If the matrix to be transposed has more than one
// vector, it may have jumbled indices in its vectors, which must be sorted.
// If the input matrix has a single vector, it must be already sorted on input.
// The input matrix may have shallow components (even if in place), and the
// output may also have shallow components (even in the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// If A_in is NULL, then C = (*Chandle) is transposed in place. If out of
// memory, (*Chandle) is always returned as NULL, which frees the input matrix
// C if the transpose is done in place.
// If A_in is not NULL and Chandle is NULL, then A is modified in place, and
// the A_in matrix is not freed when done.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. For many matrices, e is O(n),
// although the constant can be high. The qsort method is more scalable, but
// not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Count, ntasks+1, sizeof (int64_t)) ; \
} \
// free prior content of A, if transpose is done in place
#define GB_FREE_IN_PLACE_A \
{ \
if (in_place) \
{ \
/* A is being transposed in placed */ \
/* free prior content of A but not &A itself */ \
if (!Ap_shallow) GB_FREE_MEMORY (Ap, aplen+1, sizeof (int64_t)) ; \
if (!Ah_shallow) GB_FREE_MEMORY (Ah, aplen , sizeof (int64_t)) ; \
if (!Ai_shallow) GB_FREE_MEMORY (Ai, anzmax , sizeof (int64_t)) ; \
if (!Ax_shallow) GB_FREE_MEMORY (Ax, anzmax , asize) ; \
} \
else \
{ \
/* A is not modified; it is purely an input matrix */ \
; \
} \
}
// free the new C matrix, unless C=A' is being done in place of A
#define GB_FREE_C \
{ \
if (!in_place_A) \
{ \
/* free all of C and all its contents &C */ \
GB_MATRIX_FREE (Chandle) ; \
} \
}
// free both A (if in place) and C (if not in place of A)
#define GB_FREE_A_AND_C \
{ \
GB_FREE_IN_PLACE_A ; \
GB_FREE_C ; \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A')
(
GrB_Matrix *Chandle, // output matrix C, possibly modified in place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A_in, // input matrix
const GrB_UnaryOp op_in, // optional operator to apply to the values
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in place
//--------------------------------------------------------------------------
bool in_place_C, in_place_A ;
GrB_Matrix A, C ;
if (A_in == NULL)
{
//----------------------------------------------------------------------
// C = C' ; &C is transposed in place
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, NULL, op) ;
// C=A' is transposed in place, in the matrix C.
// The matrix C is freed if an error occurs and C is set to NULL.
ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL
A = (*Chandle) ;
C = A ; // C must be freed if an error occurs
in_place_C = true ; // C is modified in place
in_place_A = false ;
ASSERT (A == C && A == (*Chandle)) ;
}
else if (Chandle == NULL || (*Chandle) == A_in)
{
//----------------------------------------------------------------------
// A = A' ; A is transposed in place; reuse the header of A
//----------------------------------------------------------------------
// GB_transpose (NULL, ctype, csc, A, op) ;
// GB_transpose (&A, ctype, csc, A, op) ;
// C=A' is transposed in place, in the matrix A.
// The matrix A_in is not freed if an error occurs.
A = A_in ;
Chandle = &A ; // C must not be freed if an error occurs
C = A ;
in_place_C = false ;
in_place_A = true ; // A is modified in place
ASSERT (A == C && A == (*Chandle)) ;
}
else
{
//----------------------------------------------------------------------
// C = A' ; C and A are different
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, A, op) ;
// C and A are both non-NULL, and not aliased.
// C=A' where C is a new matrix constructed here.
// The matrix C is freed if an error occurs, and C is set to NULL.
A = A_in ;
C = NULL ;
(*Chandle) = NULL ; // C must be allocated; freed on error
in_place_C = false ; // C and A are different matrices
in_place_A = false ;
ASSERT (A != C && A != (*Chandle)) ;
}
bool in_place = (in_place_A || in_place_C) ;
ASSERT_OK_OR_JUMBLED (GB_check (A, "A input for GB_transpose", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (ctype, "ctype for GB_transpose", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (op_in, "op for GB_transpose", GB0)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use here
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t aplen = A->plen ;
bool A_is_hyper = A->is_hyper ;
double A_hyper_ratio = A->hyper_ratio ;
int64_t anzmax = A->nzmax ;
// if in place, these must be freed when done, whether successful or not
int64_t *restrict Ap = A->p ;
int64_t *restrict Ah = A->h ;
int64_t *restrict Ai = A->i ;
GB_void *restrict Ax = A->x ;
bool Ap_shallow = A->p_shallow ;
bool Ah_shallow = A->h_shallow ;
bool Ai_shallow = A->i_shallow ;
bool Ax_shallow = A->x_shallow ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
int ntasks = (nth == 1) ? 1 : (8 * nth) ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t *restrict Count = NULL ; // size ntasks+1, if allocated
if (anz > 0 && avdim != 1 && avlen == 1)
{
// Count is only used in one case below
GB_CALLOC_MEMORY (Count, ntasks+1, sizeof (int64_t)) ;
if (Count == NULL)
{
// out of memory
GB_FREE_C ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// determine the type of C and get the unary operator
//--------------------------------------------------------------------------
GrB_UnaryOp op ;
if (op_in == NULL)
{
// no operator
op = NULL ;
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
else
{
// If a unary operator z=op(x) is present, C is always returned as
// op->ztype. The input ctype is ignored.
if (op_in->opcode == GB_IDENTITY_opcode && atype == op_in->xtype)
{
// op is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast.
ASSERT (op_in->ztype == op_in->xtype) ;
op = NULL ;
ctype = atype ;
}
else
{
// apply the operator, z=op(x)
op = op_in ;
ctype = op->ztype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// C = A'
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
bool allocate_new_Cx = (ctype != atype) || (op != NULL) ;
if (anz == 0)
{
//======================================================================
// quick return if A is empty
//======================================================================
GB_FREE_IN_PLACE_A ;
// A is empty; create a new empty matrix C, with the new type and
// dimensions. C is hypersparse for now but may convert when
// returned.
GB_CREATE (Chandle, ctype, avdim, avlen, GB_Ap_calloc,
C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 1, 1, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
ASSERT_OK (GB_check (*Chandle, "C transpose empty", GB0)) ;
}
else if (avdim == 1)
{
//======================================================================
// transpose a "column" vector into a "row"
//======================================================================
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be already sorted on input
ASSERT_OK (GB_check (A, "the vector A must already be sorted", GB0)) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
GB_NEW (Chandle, ctype, 1, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *restrict Cx = NULL ;
int64_t *restrict Cp ;
int64_t *restrict Ci ;
GB_MALLOC_MEMORY (Cp, anz+1, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (Ci, anz , sizeof (int64_t)) ;
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
GB_MALLOC_MEMORY (Cx, anz, ctype->size) ;
}
if (Cp == NULL || Ci == NULL || (allocate_new_Cx && (Cx == NULL)))
{
// out of memory
GB_FREE_MEMORY (Cp, anz+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (Ci, anz , sizeof (int64_t)) ;
GB_FREE_MEMORY (Cx, anz , csize) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// the transpose will now succeed; fill the content of C
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op (Cx, op, Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, anz, Context) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A.
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
// each entry in A becomes a non-empty vector in C
C->h = Ai ; C->h_shallow = (in_place) ? Ai_shallow : true ;
Ai = NULL ; // do not free prior Ai
C->nzmax = anz ;
// C->p = 0:anz and C->i = zeros (1,anz), newly allocated
C->plen = anz ;
C->nvec = anz ;
C->nvec_nonempty = anz ;
C->i = Ci ; C->i_shallow = false ;
C->p = Cp ; C->p_shallow = false ;
// fill the vector pointers C->p
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k <= anz ; k++)
{
Cp [k] = k ;
}
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else if (avlen == 1)
{
//======================================================================
// transpose a "row" into a "column" vector
//======================================================================
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_OK (GB_check (A, "1-by-n input A already sorted", GB0)) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is NON-hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
GB_NEW (Chandle, ctype, avdim, 1, GB_Ap_null, C_is_csc,
GB_FORCE_NONHYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *restrict Cx = NULL ;
int64_t *restrict Cp ;
int64_t *restrict Ci = NULL ;
GB_CALLOC_MEMORY (Cp, 2, sizeof (int64_t)) ;
bool allocate_new_Ci = (!A_is_hyper) ;
if (allocate_new_Ci)
{
// A is not hypersparse, so new space is needed for Ci
GB_MALLOC_MEMORY (Ci, anz, sizeof (int64_t)) ;
}
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
GB_MALLOC_MEMORY (Cx, anz, ctype->size) ;
}
if (Cp == NULL || (allocate_new_Cx && (Cx == NULL))
|| (allocate_new_Ci && (Ci == NULL)))
{
// out of memory
GB_FREE_MEMORY (Cp, 2 , sizeof (int64_t)) ;
GB_FREE_MEMORY (Ci, anz , sizeof (int64_t)) ;
GB_FREE_MEMORY (Cx, anz , csize) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of C: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
if (op != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op (Cx, op, Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, anz, Context) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
//----------------------------------------------------------------------
// pattern of C
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in C
//------------------------------------------------------------------
ASSERT (!allocate_new_Ci) ;
C->i = Ah ; C->i_shallow = (in_place) ? Ah_shallow : true ;
ASSERT (anvec == anz) ;
Ah = NULL ; // do not free prior Ah
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in C
//------------------------------------------------------------------
ASSERT (allocate_new_Ci) ;
ASSERT (Ah == NULL) ;
if (nth == 1)
{
//--------------------------------------------------------------
// construct Ci with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct Ci in parallel
//--------------------------------------------------------------
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (int tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
ASSERT (Ci [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
C->i = Ci ; C->i_shallow = false ;
}
//----------------------------------------------------------------------
// vector pointers of C
//----------------------------------------------------------------------
C->nzmax = anz ;
// C->p = [0 anz] and C->h = NULL
ASSERT (C->plen == 1) ;
ASSERT (C->nvec == 1) ;
ASSERT (C->h == NULL) ;
C->p = Cp ; C->p_shallow = false ;
C->nvec_nonempty = (anz == 0) ? 0 : 1 ;
// fill the vector pointers C->p
Cp [0] = 0 ;
Cp [1] = anz ;
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else
{
//======================================================================
// transpose a general matrix
//======================================================================
ASSERT_OK_OR_JUMBLED (GB_check (A, "A GB_transpose jumbled ok", GB0)) ;
ASSERT (avdim > 1 && avlen > 1) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// for the qsort method, if the transpose is done in place and A->i is
// not shallow, A->i can be used and then freed. Otherwise, A->i is
// not modified at all.
bool recycle_Ai = (in_place && !Ai_shallow) ;
bool use_qsort ;
if (A_is_hyper)
{
//------------------------------------------------------------------
// always use qsort for hypersparse matrices
//------------------------------------------------------------------
use_qsort = true ;
}
else
{
//------------------------------------------------------------------
// select qsort if the transpose will likely be hypersparse
//------------------------------------------------------------------
use_qsort = GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, avlen) ;
}
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_qsort)
{
//==================================================================
// transpose via quicksort
//==================================================================
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
int64_t *iwork ;
GB_MALLOC_MEMORY (iwork, anz, sizeof (int64_t)) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output. This phase
// must be done before Chandle is created below, since that step
// destroys A.
GB_extract_vector_list (iwork, A, nthreads) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and S)
//------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse
GB_NEW (Chandle, ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_MEMORY (iwork, anz, sizeof (int64_t)) ;
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// if in_place, the prior Ap and Ah can now be freed
if (in_place)
{
if (!Ap_shallow) GB_FREE_MEMORY (Ap, aplen+1, sizeof (int64_t));
if (!Ah_shallow) GB_FREE_MEMORY (Ah, aplen , sizeof (int64_t));
}
int64_t *jwork = NULL ;
GB_Type_code scode ;
GB_void *S = NULL ;
GB_void *Swork = NULL ;
if (!recycle_Ai)
{
// allocate jwork of size anz
GB_MALLOC_MEMORY (jwork, anz, sizeof (int64_t)) ;
}
if (op != NULL)
{
// allocate Swork of size anz * csize
GB_MALLOC_MEMORY (Swork, anz, csize) ;
}
if ((!recycle_Ai && (jwork == NULL))
|| ((op != NULL) && (Swork == NULL)))
{
// out of memory
GB_FREE_MEMORY (iwork, anz, sizeof (int64_t)) ;
GB_FREE_MEMORY (jwork, anz, sizeof (int64_t)) ;
GB_FREE_MEMORY (Swork, anz, csize) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// Ai is used as workspace for the "column" indices of C.
// jwork is a shallow copy of Ai, and is freed by GB_builder.
jwork = Ai ;
ASSERT (in_place) ;
// set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A
Ai = NULL ;
}
else
{
// jwork = Ai, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads) ;
}
// numerical values: apply the op, typecast, or make shallow copy
if (op != NULL)
{
// Swork = op ((op->xtype) Ax)
GB_apply_op (Swork, op, Ax, atype, anz, Context) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
scode = ccode ;
#if 0
if (in_place && !Ax_shallow)
{
// A is being transposed in place so A->x is no longer
// needed. If A->x is shallow this can be skipped. T->x
// will not be shallow if the op is present. A->x should
// be freed early to free up space for GB_builder.
// However, in the current usage, when op is used, A is not
// transposed in place, so this step is not needed.
ASSERT (GB_DEAD_CODE) ;
GB_FREE_MEMORY (Ax, anzmax , asize) ;
}
#endif
}
else
{
// GB_builder will typecast S from atype to ctype if needed.
// S is a shallow copy of Ax, and must not be modified.
S = Ax ;
scode = acode ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total high-water memory usage is anz * max (csize,
// sizeof(int64_t)). T is always hypersparse.
// If op is not NULL, then Swork can be transplanted into T in
// GB_builder, instead. However, this requires the tuples to be
// sorted on input, which is possible but rare for GB_transpose.
GrB_Matrix T ;
info = GB_builder
(
&T, // create T
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&jwork, // jwork_handle, freed on output
&Swork, // Swork_handle, freed on output
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
false, // ijcheck: unused
NULL, NULL, // original I,J indices: not used here
S, // array of values of type scode, not modified
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
scode, // type of S or Swork
Context
) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify S.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
//------------------------------------------------------------------
// free prior space and transplant T into C
//------------------------------------------------------------------
// Free the prior content of the input matrix, if done in place.
// Ap, Ah, and Ai have already been freed, but Ax has not.
GB_FREE_IN_PLACE_A ;
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (info) ;
}
// Transplant T in to the result C. The matrix T is not shallow
// and no typecasting is done, so this will always succeed.
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
//==================================================================
// transpose via bucket sort
//==================================================================
// This method does not operate on the matrix in place, so it must
// create a temporary matrix T. Then the input matrix is freed and
// replaced with the new matrix T.
ASSERT (!A_is_hyper) ;
// T is also typecasted to ctype, if not NULL
GrB_Matrix T ;
info = GB_transpose_bucket (&T, ctype, C_is_csc, A, op, Context) ;
// free prior content, if C=A' is being done in place
if (in_place_A)
{
// free all content of A, but not the header, if in place of A
GB_PHIX_FREE (A) ; // transpose in-place
}
else if (in_place_C)
{
// free all of C, including the header, if done in place of C
GB_MATRIX_FREE (Chandle) ;
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_transpose_bucket
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
ASSERT_OK (GB_check (T, "T from bucket", GB0)) ;
if (in_place_A)
{
// The header of A has not been freed, since it is used for the
// output. Transplant T back into A and free T. T is not
// shallow and no typecast is done so this will always succeed.
info = GB_transplant (A, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// If C=A' is done in place of C, then the header and content
// of the input C has been freed. The output T can now be
// moved to the Chandle.
ASSERT (*Chandle == NULL) ;
(*Chandle) = T ;
}
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_WORK ;
//--------------------------------------------------------------------------
// conform the result to the desired hypersparsity of A
//--------------------------------------------------------------------------
// get the output matrix
C = (*Chandle) ;
// transplant the hyper_ratio from A to C
C->hyper_ratio = A_hyper_ratio ;
ASSERT_OK (GB_check (C, "C to conform in GB_transpose", GB0)) ;
info = GB_to_hyper_conform (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_OK (GB_check (*Chandle, "Chandle conformed in GB_transpose", GB0)) ;
return (GrB_SUCCESS) ;
}
|
omp2.c | #include<stdio.h>
int main() {
int i;
#pragma omp parallel for
for (i = 0; i <= 15; i++) {
if (omp_get_thread_num() == 0) {
printf("%d\n", omp_get_num_procs());
printf("%d\n", omp_get_num_threads());
}
}
return 0;
}
|
day05_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#define NUM_SEATS 1024 // 128 * 8
int find_element(char* pass, int len);
int main(int argc, char* argv[]) {
char rowstrings[NUM_SEATS][8], colstrings[NUM_SEATS][4];
int row, col, id, largest_id = 0;
bool seats[NUM_SEATS] = {false};
int i = 0, num_tickets = 0, my_id;
// Need to read in stream first before allocating to threads
while (scanf("%7s%3s\n", rowstrings[i], colstrings[i]) > 0) {
i++;
}
num_tickets = i; // Will be less than the number of seats
# pragma omp parallel\
shared(num_tickets, seats, rowstrings, colstrings, my_id) \
private(i, row, col, id) default(none) \
reduction(max:largest_id)
{
if (omp_get_thread_num() == 0) printf("Running on %d threads\n", omp_get_num_threads());
#pragma omp for schedule(static)
for (i=0; i<num_tickets; i++) {
row = find_element(rowstrings[i], 7);
col = find_element(colstrings[i], 3);
id = row * 8 + col;
seats[id] = true;
if (id > largest_id) largest_id = id;
}
#pragma omp for schedule(static)
for (i=1; i<NUM_SEATS; i++) {
if (seats[i] == false && seats[i-1] && seats[i+1]) my_id = i; // Real risk of race condition here
}
}
printf("The largest Ticket ID is: %d\n", largest_id);
printf("My Ticket ID is: %d\n", my_id);
}
int find_element(char* pass, int len) {
int lo = 0, hi, i, mid;
hi = (int) pow(2, len) - 1;
for (i=0; i<len; i++) {
mid = (hi + lo) / 2;
if (pass[i] == 'F' || pass[i] == 'L') hi = mid - 1;
else if (pass[i] == 'B' || pass[i] == 'R') lo = mid + 1;
}
return lo;
}
|
real_self_energy.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "real_self_energy.h"
#include <math.h>
#include <stdlib.h>
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "real_to_reciprocal.h"
static double get_real_self_energy_at_band(
const long band_index, const Darray *fc3_normal_squared,
const double fpoint, const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency);
static double sum_real_self_energy_at_band(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double temperature, const double cutoff_frequency);
static double sum_real_self_energy_at_band_0K(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double cutoff_frequency);
void rse_get_real_self_energy_at_bands(
double *real_self_energy, const Darray *fc3_normal_squared,
const long *band_indices, const double *frequencies,
const long (*triplets)[3], const long *triplet_weights,
const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
long i, num_band0, num_band, gp0;
double fpoint;
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
gp0 = triplets[0][0];
/* num_band0 and num_band_indices have to be same. */
for (i = 0; i < num_band0; i++) {
fpoint = frequencies[gp0 * num_band + band_indices[i]];
if (fpoint < cutoff_frequency) {
real_self_energy[i] = 0;
} else {
real_self_energy[i] = get_real_self_energy_at_band(
i, fc3_normal_squared, fpoint, frequencies, triplets,
triplet_weights, epsilon, temperature, unit_conversion_factor,
cutoff_frequency);
}
}
}
void rse_get_real_self_energy_at_frequency_point(
double *real_self_energy, const double frequency_point,
const Darray *fc3_normal_squared, const long *band_indices,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
long i, num_band0;
num_band0 = fc3_normal_squared->dims[1];
/* num_band0 and num_band_indices have to be same. */
for (i = 0; i < num_band0; i++) {
if (frequency_point < cutoff_frequency) {
real_self_energy[i] = 0;
} else {
real_self_energy[i] = get_real_self_energy_at_band(
i, fc3_normal_squared, frequency_point, frequencies, triplets,
triplet_weights, epsilon, temperature, unit_conversion_factor,
cutoff_frequency);
}
}
}
static double get_real_self_energy_at_band(
const long band_index, const Darray *fc3_normal_squared,
const double fpoint, const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
long i, num_triplets, num_band0, num_band, gp1, gp2;
double shift;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
shift = 0;
#ifdef PHPYOPENMP
#pragma omp parallel for private(gp1, gp2) reduction(+ : shift)
#endif
for (i = 0; i < num_triplets; i++) {
gp1 = triplets[i][1];
gp2 = triplets[i][2];
if (temperature > 0) {
shift += sum_real_self_energy_at_band(
num_band,
fc3_normal_squared->data +
i * num_band0 * num_band * num_band +
band_index * num_band * num_band,
fpoint, frequencies + gp1 * num_band,
frequencies + gp2 * num_band, epsilon, temperature,
cutoff_frequency) *
triplet_weights[i] * unit_conversion_factor;
} else {
shift +=
sum_real_self_energy_at_band_0K(
num_band,
fc3_normal_squared->data +
i * num_band0 * num_band * num_band +
band_index * num_band * num_band,
fpoint, frequencies + gp1 * num_band,
frequencies + gp2 * num_band, epsilon, cutoff_frequency) *
triplet_weights[i] * unit_conversion_factor;
}
}
return shift;
}
static double sum_real_self_energy_at_band(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double temperature, const double cutoff_frequency) {
long i, j;
double n1, n2, f1, f2, f3, f4, shift;
/* double sum; */
shift = 0;
for (i = 0; i < num_band; i++) {
if (freqs1[i] > cutoff_frequency) {
n1 = phonoc_bose_einstein(freqs1[i], temperature);
for (j = 0; j < num_band; j++) {
if (freqs2[j] > cutoff_frequency) {
n2 = phonoc_bose_einstein(freqs2[j], temperature);
f1 = fpoint + freqs1[i] + freqs2[j];
f2 = fpoint - freqs1[i] - freqs2[j];
f3 = fpoint - freqs1[i] + freqs2[j];
f4 = fpoint + freqs1[i] - freqs2[j];
/* sum = 0;
* if (fabs(f1) > epsilon) {
* sum -= (n1 + n2 + 1) / f1;
* }
* if (fabs(f2) > epsilon) {
* sum += (n1 + n2 + 1) / f2;
* }
* if (fabs(f3) > epsilon) {
* sum -= (n1 - n2) / f3;
* }
* if (fabs(f4) > epsilon) {
* sum += (n1 - n2) / f4;
* }
* shift += sum * fc3_normal_squared[i * num_band + j]; */
shift +=
(-(n1 + n2 + 1) * f1 / (f1 * f1 + epsilon * epsilon) +
(n1 + n2 + 1) * f2 / (f2 * f2 + epsilon * epsilon) -
(n1 - n2) * f3 / (f3 * f3 + epsilon * epsilon) +
(n1 - n2) * f4 / (f4 * f4 + epsilon * epsilon)) *
fc3_normal_squared[i * num_band + j];
}
}
}
}
return shift;
}
static double sum_real_self_energy_at_band_0K(
const long num_band, const double *fc3_normal_squared, const double fpoint,
const double *freqs1, const double *freqs2, const double epsilon,
const double cutoff_frequency) {
long i, j;
double f1, f2, shift;
shift = 0;
for (i = 0; i < num_band; i++) {
if (freqs1[i] > cutoff_frequency) {
for (j = 0; j < num_band; j++) {
if (freqs2[j] > cutoff_frequency) {
f1 = fpoint + freqs1[i] + freqs2[j];
f2 = fpoint - freqs1[i] - freqs2[j];
shift += (-1 * f1 / (f1 * f1 + epsilon * epsilon) +
1 * f2 / (f2 * f2 + epsilon * epsilon)) *
fc3_normal_squared[i * num_band + j];
}
}
}
}
return shift;
}
|
GB_unop.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply)
// op(A') function: GB (_unop_tran)
// C type: GB_ctype
// A type: GB_atype
// cast: GB_cast(cij,aij)
// unaryop: GB_unaryop(cij,aij)
#define GB_ATYPE \
GB_atype
#define GB_CTYPE \
GB_ctype
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GB_geta(aij,Ax,pA)
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
GB_unaryop(z, x) ;
// casting
#define GB_CAST(z, aij) \
GB_cast(z, aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_geta(aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_cast(z, aij) ; \
GB_unaryop(Cx [pC], z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
GB_disable
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
if_unop_apply_enabled
GrB_Info GB (_unop_apply)
(
GB_ctype *Cx, // Cx and Ax may be aliased
const GB_atype *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_geta(aij, Ax, p) ;
GB_cast(z, aij) ;
GB_unaryop(Cx [p], z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GB_geta(aij, Ax, p) ;
GB_cast(z, aij) ;
GB_unaryop(Cx [p], z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
endif_unop_apply_enabled
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
draw-private.h | /*
Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License.
obtain a copy of the License at
http://www.imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore private image drawing methods.
*/
#ifndef _MAGICKCORE_DRAW_PRIVATE_H
#define _MAGICKCORE_DRAW_PRIVATE_H
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/memory_.h"
static inline MagickBooleanType GetFillColor(const DrawInfo *draw_info,
const ssize_t x,const ssize_t y,PixelPacket *pixel)
{
Image
*pattern;
MagickBooleanType
status;
pattern=draw_info->fill_pattern;
if (pattern == (Image *) NULL)
{
*pixel=draw_info->fill;
return(MagickTrue);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT) && (_OPENMP >= 200203)
#pragma omp critical
#endif
status=GetOneVirtualMethodPixel(pattern,TileVirtualPixelMethod,
x+pattern->tile_offset.x,y+pattern->tile_offset.y,pixel,
&pattern->exception);
if (pattern->matte == MagickFalse)
pixel->opacity=OpaqueOpacity;
return(status);
}
static inline MagickBooleanType GetStrokeColor(const DrawInfo *draw_info,
const ssize_t x,const ssize_t y,PixelPacket *pixel)
{
Image
*pattern;
MagickBooleanType
status;
pattern=draw_info->stroke_pattern;
if (pattern == (Image *) NULL)
{
*pixel=draw_info->stroke;
return(MagickTrue);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT) && (_OPENMP >= 200203)
#pragma omp critical
#endif
status=GetOneVirtualMethodPixel(pattern,TileVirtualPixelMethod,
x+pattern->tile_offset.x,y+pattern->tile_offset.y,pixel,
&pattern->exception);
if (pattern->matte == MagickFalse)
pixel->opacity=OpaqueOpacity;
return(status);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
Fig_8.3_RegPromote.c | // sample compile command: "gcc -fopenmp -c Fig_8.3_RegPromote.c" to generate *.o object file
#include <omp.h>
#define TOL 0.0001
#define MAX 100000
#define NMAX 1000
//embarrassingly parallel computation, returns a convergence parameter
double doit(double *A, int N, int id);
int main()
{
int iter = 0;
int N = 1000;
double A[NMAX] = {0.0};
double conv=0.0;
#pragma omp parallel shared(A,N,iter) firstprivate(conv)
{
int id = omp_get_thread_num();
int nthrd = omp_get_num_threads();
while (iter < MAX) {
conv = doit(A, N, id);
if (conv < TOL) break;
if (id == 0) iter++;
}
} //end parallel region
}
|
SoftMax.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SoftMax.c"
#else
void THNN_(SoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
real *input_data, *output_data;
ptrdiff_t nframe = 0, dim = 0, stride = 0;
ptrdiff_t t;
if (input->nDimension == 1)
{
nframe = 1;
dim = input->size[0];
stride = 1;
}
else if (input->nDimension == 2)
{
nframe = input->size[0];
dim = input->size[1];
stride = 1;
}
else if (input->nDimension == 3)
{
nframe = 1;
dim = input->size[0];
stride = input->size[1]*input->size[2];
}
else if (input->nDimension == 4)
{
nframe = input->size[0];
dim = input->size[1];
stride = input->size[2]*input->size[3];
}
else
{
THArgCheck(0, 2, "1D, 2D, 3D or 4D tensor expected");
}
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(t)
for (t = 0; t < stride*nframe; t++)
{
real *input_ptr = input_data + (t/stride)*dim*stride + t % stride;
real *output_ptr = output_data + (t/stride)*dim*stride + t % stride;
real inputMax = -THInf;
accreal sum;
ptrdiff_t d;
for (d = 0; d < dim; d++)
{
if (input_ptr[d*stride] >= inputMax) inputMax = input_ptr[d*stride];
}
sum = 0;
for (d = 0; d < dim; d++)
{
real z = exp(input_ptr[d*stride] - inputMax);
output_ptr[d*stride] = z;
sum += z;
}
for (d = 0; d < dim; d++)
{
output_ptr[d*stride] *= 1/sum;
}
}
THTensor_(free)(input);
}
void THNN_(SoftMax_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output)
{
THNN_CHECK_SHAPE(input, gradOutput);
real *gradInput_data, *gradOutput_data, *output_data;
ptrdiff_t nframe = 0, dim = 0, stride = 0;
ptrdiff_t t;
if (output->nDimension == 1)
{
nframe = 1;
dim = output->size[0];
stride = 1;
}
else if (output->nDimension == 2)
{
nframe = output->size[0];
dim = output->size[1];
stride = 1;
}
else if (output->nDimension == 3)
{
nframe = 1;
dim = output->size[0];
stride = output->size[1]*output->size[2];
}
else if (output->nDimension == 4)
{
nframe = output->size[0];
dim = output->size[1];
stride = output->size[2]*output->size[3];
}
else
{
THError("1D, 2D, 3D or 4D tensor expected");
}
gradOutput = THTensor_(newContiguous)(gradOutput);
output = THTensor_(newContiguous)(output);
THTensor_(resizeAs)(gradInput, output);
gradInput_data = THTensor_(data)(gradInput);
output_data = THTensor_(data)(output);
gradOutput_data = THTensor_(data)(gradOutput);
#pragma omp parallel for private(t)
for (t = 0; t < stride*nframe; t++)
{
real *gradInput_ptr = gradInput_data + (t/stride)*dim*stride + t % stride;
real *output_ptr = output_data + (t/stride)*dim*stride + t % stride;
real *gradOutput_ptr = gradOutput_data + (t/stride)*dim*stride + t % stride;
ptrdiff_t d;
accreal sum = 0;
for (d = 0; d < dim; d++)
sum += (accreal)gradOutput_ptr[d*stride] * output_ptr[d*stride];
for (d = 0; d < dim; d++)
gradInput_ptr[d*stride] = output_ptr[d*stride] * (gradOutput_ptr[d*stride] - sum);
}
THTensor_(free)(gradOutput);
THTensor_(free)(output);
}
#endif
|
perturbation_fold.c | #ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef VRNA_WITH_GSL
#include <gsl/gsl_multimin.h>
#endif
#include "ViennaRNA/eval.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/part_func.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/perturbation_fold.h"
static void
calculate_probability_unpaired(vrna_fold_compound_t *vc,
double *probability)
{
int length = vc->length;
FLT_OR_DBL *probs = vc->exp_matrices->probs;
int *iidx = vc->iindx;
int i, j;
for (i = 0; i <= length; ++i)
probability[i] = 1;
for (i = 1; i <= length; ++i)
for (j = i + 1; j <= length; ++j) {
probability[i] -= probs[iidx[i] - j];
probability[j] -= probs[iidx[i] - j];
}
}
#if 0
static double
calculate_norm(double *vector,
int length)
{
double sum = 0;
int i;
for (i = 1; i <= length; ++i)
sum += vector[i] * vector[i];
return sqrt(sum);
}
#endif
static void
addSoftConstraint(vrna_fold_compound_t *vc,
const double *epsilon,
int length)
{
/* remove previous soft constraints */
vrna_sc_init(vc);
/* prepare vector of unpaired constraints in kcal/mol */
FLT_OR_DBL *constraints = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (length + 1));
memcpy(constraints + 1, epsilon + 1, sizeof(FLT_OR_DBL) * length);
/* add new soft constraints */
vrna_sc_set_up(vc, (const FLT_OR_DBL *)constraints, VRNA_OPTION_DEFAULT);
free(constraints);
}
static double
evaluate_objective_function_contribution(double value,
int objective_function)
{
if (objective_function == VRNA_OBJECTIVE_FUNCTION_QUADRATIC)
return value * value;
if (objective_function == VRNA_OBJECTIVE_FUNCTION_ABSOLUTE)
return fabs(value);
assert(0);
return 0;
}
static double
evaluate_perturbation_vector_score(vrna_fold_compound_t *vc,
const double *epsilon,
const double *q_prob_unpaired,
double sigma_squared,
double tau_squared,
int objective_function)
{
double ret = 0;
double ret2 = 0.;
double *p_prob_unpaired;
int i;
int length = vc->length;
/* calculate pairing probabilty in the pertubated energy model */
p_prob_unpaired = vrna_alloc(sizeof(double) * (length + 1));
addSoftConstraint(vc, epsilon, length);
vc->params->model_details.compute_bpp = 1;
vc->exp_params->model_details.compute_bpp = 1;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
calculate_probability_unpaired(vc, p_prob_unpaired);
vrna_sc_remove(vc);
for (i = 1; i <= length; ++i) {
/* add penalty for pertubation energies */
ret += evaluate_objective_function_contribution(epsilon[i], objective_function) / tau_squared;
/* add penalty for mismatches between observed and predicted probabilities */
if (q_prob_unpaired[i] >= 0) /* ignore positions with missing data */
ret2 += evaluate_objective_function_contribution(p_prob_unpaired[i] - q_prob_unpaired[i],
objective_function) / sigma_squared;
}
vrna_message_info(stderr, "Score: pertubation: %g\tdiscrepancy: %g", ret, ret2);
free(p_prob_unpaired);
return ret + ret2;
}
static void
pairing_probabilities_from_restricted_pf(vrna_fold_compound_t *vc,
const double *epsilon,
double *prob_unpaired,
double **conditional_prob_unpaired)
{
int length = vc->length;
int i;
addSoftConstraint(vc, epsilon, length);
vc->params->model_details.compute_bpp = 1;
vc->exp_params->model_details.compute_bpp = 1;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
calculate_probability_unpaired(vc, prob_unpaired);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 1; i <= length; ++i) {
vrna_fold_compound_t *restricted_vc;
char *hc_string;
unsigned int constraint_options = VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
hc_string = vrna_alloc(sizeof(char) * (length + 1));
memset(hc_string, '.', length);
hc_string[i - 1] = 'x';
restricted_vc = vrna_fold_compound(vc->sequence,
&(vc->exp_params->model_details),
VRNA_OPTION_DEFAULT);
vrna_constraints_add(restricted_vc, hc_string, constraint_options);
free(hc_string);
vrna_exp_params_subst(restricted_vc, vc->exp_params);
vrna_pf(restricted_vc, NULL);
calculate_probability_unpaired(restricted_vc, conditional_prob_unpaired[i]);
restricted_vc->sc = NULL;
vrna_fold_compound_free(restricted_vc);
}
vrna_sc_remove(vc);
}
static void
pairing_probabilities_from_sampling(vrna_fold_compound_t *vc,
const double *epsilon,
int sample_size,
double *prob_unpaired,
double **conditional_prob_unpaired,
unsigned int options)
{
char **samples, **ptr;
int length, i, j;
double mfe;
length = vc->length;
addSoftConstraint(vc, epsilon, length);
vc->params->model_details.compute_bpp = 0;
vc->exp_params->model_details.compute_bpp = 0;
/* get new (constrained) MFE to scale pf computations properly */
mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
samples = vrna_pbacktrack_num(vc,
(unsigned int)sample_size,
options);
for (ptr = samples; (*ptr); ptr++) {
for (i = length; i > 0; i--) {
if ((*ptr)[i - 1] == '.') {
++prob_unpaired[i];
for (j = length; j > 0; j--)
if ((*ptr)[j - 1] == '.')
++conditional_prob_unpaired[i][j];
}
}
free(*ptr);
}
free(samples);
for (i = 1; i <= length; ++i) {
if (prob_unpaired[i])
for (j = 1; j <= length; ++j)
conditional_prob_unpaired[i][j] /= prob_unpaired[i];
prob_unpaired[i] /= sample_size;
assert(prob_unpaired[i] >= 0 && prob_unpaired[i] <= 1);
}
vrna_sc_remove(vc);
}
static void
allocateProbabilityArrays(double **unpaired,
double ***conditional_unpaired,
int length)
{
int i;
*unpaired = vrna_alloc(sizeof(double) * (length + 1));
*conditional_unpaired = vrna_alloc(sizeof(double *) * (length + 1));
for (i = 1; i <= length; ++i)
(*conditional_unpaired)[i] = vrna_alloc(sizeof(double) * (length + 1));
}
static void
freeProbabilityArrays(double *unpaired,
double **conditional_unpaired,
int length)
{
int i;
free(unpaired);
for (i = 1; i <= length; ++i)
free(conditional_unpaired[i]);
free(conditional_unpaired);
}
static void
evaluate_perturbation_vector_gradient(vrna_fold_compound_t *vc,
const double *epsilon,
const double *q_prob_unpaired,
double sigma_squared,
double tau_squared,
int objective_function,
int sample_size,
double *gradient)
{
double *p_prob_unpaired;
double **p_conditional_prob_unpaired;
int i, mu;
int length = vc->length;
double kT = vc->exp_params->kT / 1000;
allocateProbabilityArrays(&p_prob_unpaired, &p_conditional_prob_unpaired, length);
if (sample_size > 0) {
pairing_probabilities_from_sampling(vc,
epsilon,
sample_size,
p_prob_unpaired,
p_conditional_prob_unpaired,
VRNA_PBACKTRACK_DEFAULT);
} else if (sample_size < 0) {
pairing_probabilities_from_sampling(vc,
epsilon,
-sample_size,
p_prob_unpaired,
p_conditional_prob_unpaired,
VRNA_PBACKTRACK_NON_REDUNDANT);
} else {
pairing_probabilities_from_restricted_pf(vc,
epsilon,
p_prob_unpaired,
p_conditional_prob_unpaired);
}
for (mu = 1; mu <= length; ++mu) {
double sum = 0;
if (objective_function == VRNA_OBJECTIVE_FUNCTION_QUADRATIC) {
for (i = 1; i <= length; ++i) {
if (q_prob_unpaired[i] < 0) /* ignore positions with missing data */
continue;
sum += (p_prob_unpaired[i] - q_prob_unpaired[i])
* p_prob_unpaired[i] * (p_prob_unpaired[mu] - p_conditional_prob_unpaired[i][mu])
/ sigma_squared;
}
gradient[mu] = 2 * (epsilon[mu] / tau_squared + sum / kT);
} else if (objective_function == VRNA_OBJECTIVE_FUNCTION_ABSOLUTE) {
for (i = 1; i <= length; ++i)
if (q_prob_unpaired[i] >= 0 && p_prob_unpaired[i] != q_prob_unpaired[i]) {
sum += (p_prob_unpaired[i] * (p_prob_unpaired[mu] - p_conditional_prob_unpaired[i][mu])) /
kT
/ sigma_squared
* (p_prob_unpaired[i] > q_prob_unpaired[i] ? 1. : -1.);
}
if (epsilon[mu])
sum += (epsilon[mu] > 0 ? 1. : -1.) / tau_squared;
gradient[mu] = sum;
}
}
freeProbabilityArrays(p_prob_unpaired, p_conditional_prob_unpaired, length);
}
#ifdef VRNA_WITH_GSL
typedef struct parameters_gsl {
vrna_fold_compound_t *vc;
const double *q_prob_unpaired;
double sigma_squared;
double tau_squared;
int objective_function;
int sample_size;
} parameters_gsl;
static double
f_gsl(const gsl_vector *x,
void *params)
{
parameters_gsl *p = params;
return evaluate_perturbation_vector_score(p->vc,
x->data,
p->q_prob_unpaired,
p->sigma_squared,
p->tau_squared,
p->objective_function);
}
static void
df_gsl(const gsl_vector *x,
void *params,
gsl_vector *df)
{
parameters_gsl *p = params;
gsl_vector_set(df, 0, 0);
evaluate_perturbation_vector_gradient(p->vc,
x->data,
p->q_prob_unpaired,
p->sigma_squared,
p->tau_squared,
p->objective_function,
p->sample_size,
df->data);
}
static void
fdf_gsl(const gsl_vector *x,
void *params,
double *f,
gsl_vector *g)
{
*f = f_gsl(x, params);
df_gsl(x, params, g);
}
#endif /* VRNA_WITH_GSL */
PUBLIC void
vrna_sc_minimize_pertubation(vrna_fold_compound_t *vc,
const double *q_prob_unpaired,
int objective_function,
double sigma_squared,
double tau_squared,
int algorithm,
int sample_size,
double *epsilon,
double initialStepSize,
double minStepSize,
double minImprovement,
double minimizerTolerance,
progress_callback callback)
{
int iteration_count = 0;
const int max_iterations = 100;
int length = vc->length;
#ifdef VRNA_WITH_GSL
const gsl_multimin_fdfminimizer_type *minimizer_type = 0;
struct {
int type;
const gsl_multimin_fdfminimizer_type *gsl_type;
} algorithms[] =
{ { VRNA_MINIMIZER_CONJUGATE_FR,
gsl_multimin_fdfminimizer_conjugate_fr },
{ VRNA_MINIMIZER_CONJUGATE_PR,
gsl_multimin_fdfminimizer_conjugate_pr },
{ VRNA_MINIMIZER_VECTOR_BFGS,
gsl_multimin_fdfminimizer_vector_bfgs },
{ VRNA_MINIMIZER_VECTOR_BFGS2,
gsl_multimin_fdfminimizer_vector_bfgs2 },
{ VRNA_MINIMIZER_STEEPEST_DESCENT,
gsl_multimin_fdfminimizer_steepest_descent },
{ 0,
NULL } };
int i;
for (i = 0; algorithms[i].type; ++i)
if (algorithms[i].type == algorithm) {
minimizer_type = algorithms[i].gsl_type;
break;
}
if (minimizer_type) {
parameters_gsl parameters;
gsl_multimin_function_fdf fdf;
gsl_multimin_fdfminimizer *minimizer;
gsl_vector *vector;
int status;
parameters.vc = vc;
parameters.q_prob_unpaired = q_prob_unpaired;
parameters.sigma_squared = sigma_squared;
parameters.tau_squared = tau_squared;
parameters.objective_function = objective_function;
parameters.sample_size = sample_size;
fdf.n = length + 1;
fdf.f = &f_gsl;
fdf.df = &df_gsl;
fdf.fdf = &fdf_gsl;
fdf.params = (void *)¶meters;
minimizer = gsl_multimin_fdfminimizer_alloc(minimizer_type, length + 1);
vector = gsl_vector_calloc(length + 1);
/* gsl_multimin_fdfminimizer_set(minimizer, &fdf, vector, 0.01, 1e-4); */
gsl_multimin_fdfminimizer_set(minimizer, &fdf, vector, initialStepSize, minimizerTolerance);
if (callback)
callback(0, minimizer->f, minimizer->x->data);
do {
++iteration_count;
status = gsl_multimin_fdfminimizer_iterate(minimizer);
if (callback)
callback(iteration_count, minimizer->f, minimizer->x->data);
if (status)
break;
status = gsl_multimin_test_gradient(minimizer->gradient, minimizerTolerance);
} while (status == GSL_CONTINUE && iteration_count < max_iterations);
memcpy(epsilon, minimizer->x->data, sizeof(double) * (length + 1));
gsl_multimin_fdfminimizer_free(minimizer);
gsl_vector_free(vector);
return;
}
#endif /* VRNA_WITH_GSL */
double improvement;
const double min_improvement = minImprovement;
double *new_epsilon = vrna_alloc(sizeof(double) * (length + 1));
double *gradient = vrna_alloc(sizeof(double) * (length + 1));
double score = evaluate_perturbation_vector_score(vc,
epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function);
if (callback)
callback(0, score, epsilon);
do {
double new_score;
double step_size;
++iteration_count;
evaluate_perturbation_vector_gradient(vc,
epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function,
sample_size,
gradient);
/* step_size = 0.5 / calculate_norm(gradient, length);*/
step_size = initialStepSize;
do {
int i;
for (i = 1; i <= length; ++i)
new_epsilon[i] = epsilon[i] - step_size * gradient[i];
new_score = evaluate_perturbation_vector_score(vc,
new_epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function);
improvement = 1 - new_score / score;
step_size /= 2;
} while ((improvement < min_improvement) && (step_size >= minStepSize));
if (new_score > score)
break;
if (callback)
callback(iteration_count, new_score, new_epsilon);
score = new_score;
memcpy(epsilon, new_epsilon, sizeof(double) * (length + 1));
} while (improvement >= min_improvement && iteration_count < max_iterations);
free(gradient);
free(new_epsilon);
}
|
polybench.h | /**
* polybench.h: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
/*
* Polybench header for instrumentation.
*
* Programs must be compiled with `-I utilities utilities/polybench.c'
*
* Optionally, one can define:
*
* -DPOLYBENCH_TIME, to report the execution time,
* OR (exclusive):
* -DPOLYBENCH_PAPI, to use PAPI H/W counters (defined in polybench.c)
*
*
* See README or utilities/polybench.c for additional options.
*
*/
#ifndef POLYBENCH_H
# define POLYBENCH_H
# include <stdlib.h>
/* Array padding. By default, none is used. */
# ifndef POLYBENCH_PADDING_FACTOR
/* default: */
# define POLYBENCH_PADDING_FACTOR 0
# endif
/* C99 arrays in function prototype. By default, do not use. */
# ifdef POLYBENCH_USE_C99_PROTO
# define POLYBENCH_C99_SELECT(x,y) y
# else
/* default: */
# define POLYBENCH_C99_SELECT(x,y) x
# endif
/* Scalar loop bounds in SCoPs. By default, use parametric loop bounds. */
# ifdef POLYBENCH_USE_SCALAR_LB
# define POLYBENCH_LOOP_BOUND(x,y) x
# else
/* default: */
# define POLYBENCH_LOOP_BOUND(x,y) y
# endif
/* Macros to reference an array. Generic for heap and stack arrays
(C99). Each array dimensionality has his own macro, to be used at
declaration or as a function argument.
Example:
int b[x] => POLYBENCH_1D_ARRAY(b, x)
int A[N][N] => POLYBENCH_2D_ARRAY(A, N, N)
*/
# ifndef POLYBENCH_STACK_ARRAYS
# define POLYBENCH_ARRAY(x) *x
# define POLYBENCH_FREE_ARRAY(x) free((void*)x);
# define POLYBENCH_DECL_VAR(x) (*x)
# else
# define POLYBENCH_ARRAY(x) x
# define POLYBENCH_FREE_ARRAY(x)
# define POLYBENCH_DECL_VAR(x) x
# endif
/* Macros for using arrays in the function prototypes. */
# define POLYBENCH_1D(var, dim1,ddim1) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_2D(var, dim1, dim2, ddim1, ddim2) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_3D(var, dim1, dim2, dim3, ddim1, ddim2, ddim3) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim3,ddim3) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_4D(var, dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim3,ddim3) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim4,ddim4) + POLYBENCH_PADDING_FACTOR]
# define POLYBENCH_5D(var, dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5) var[POLYBENCH_C99_SELECT(dim1,ddim1) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim2,ddim2) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim3,ddim3) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim4,ddim4) + POLYBENCH_PADDING_FACTOR][POLYBENCH_C99_SELECT(dim5,ddim5) + POLYBENCH_PADDING_FACTOR]
/* Macros to allocate heap arrays.
Example:
polybench_alloc_2d_array(N, M, double) => allocates N x M x sizeof(double)
and returns a pointer to the 2d array
*/
# define POLYBENCH_ALLOC_1D_ARRAY(n1, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data (n1 + POLYBENCH_PADDING_FACTOR, sizeof(type))
# define POLYBENCH_ALLOC_2D_ARRAY(n1, n2, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR), sizeof(type))
# define POLYBENCH_ALLOC_3D_ARRAY(n1, n2, n3, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR][n3 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR) * (n3 + POLYBENCH_PADDING_FACTOR), sizeof(type))
# define POLYBENCH_ALLOC_4D_ARRAY(n1, n2, n3, n4, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR][n3 + POLYBENCH_PADDING_FACTOR][n4 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR) * (n3 + POLYBENCH_PADDING_FACTOR) * (n4 + POLYBENCH_PADDING_FACTOR), sizeof(type))
# define POLYBENCH_ALLOC_5D_ARRAY(n1, n2, n3, n4, n5, type) \
(type(*)[n1 + POLYBENCH_PADDING_FACTOR][n2 + POLYBENCH_PADDING_FACTOR][n3 + POLYBENCH_PADDING_FACTOR][n4 + POLYBENCH_PADDING_FACTOR][n5 + POLYBENCH_PADDING_FACTOR])polybench_alloc_data ((n1 + POLYBENCH_PADDING_FACTOR) * (n2 + POLYBENCH_PADDING_FACTOR) * (n3 + POLYBENCH_PADDING_FACTOR) * (n4 + POLYBENCH_PADDING_FACTOR) * (n5 + POLYBENCH_PADDING_FACTOR), sizeof(type))
/* Macros for array declaration. */
# ifndef POLYBENCH_STACK_ARRAYS
# define POLYBENCH_1D_ARRAY_DECL(var, type, dim1, ddim1) \
type POLYBENCH_1D(POLYBENCH_DECL_VAR(var), dim1, ddim1); \
var = POLYBENCH_ALLOC_1D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), type);
# define POLYBENCH_2D_ARRAY_DECL(var, type, dim1, dim2, ddim1, ddim2) \
type POLYBENCH_2D(POLYBENCH_DECL_VAR(var), dim1, dim2, ddim1, ddim2); \
var = POLYBENCH_ALLOC_2D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), type);
# define POLYBENCH_3D_ARRAY_DECL(var, type, dim1, dim2, dim3, ddim1, ddim2, ddim3) \
type POLYBENCH_3D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, ddim1, ddim2, ddim3); \
var = POLYBENCH_ALLOC_3D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), POLYBENCH_C99_SELECT(dim3, ddim3), type);
# define POLYBENCH_4D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4) \
type POLYBENCH_4D(POLYBENCH_DECL_VAR(var), dim1, dim2, ,dim3, dim4, ddim1, ddim2, ddim3, ddim4); \
var = POLYBENCH_ALLOC_4D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), POLYBENCH_C99_SELECT(dim3, ddim3), POLYBENCH_C99_SELECT(dim4, ddim4), type);
# define POLYBENCH_5D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5) \
type POLYBENCH_5D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5); \
var = POLYBENCH_ALLOC_5D_ARRAY(POLYBENCH_C99_SELECT(dim1, ddim1), POLYBENCH_C99_SELECT(dim2, ddim2), POLYBENCH_C99_SELECT(dim3, ddim3), POLYBENCH_C99_SELECT(dim4, ddim4), POLYBENCH_C99_SELECT(dim5, ddim5), type);
# else
# define POLYBENCH_1D_ARRAY_DECL(var, type, dim1, ddim1) \
type POLYBENCH_1D(POLYBENCH_DECL_VAR(var), dim1, ddim1);
# define POLYBENCH_2D_ARRAY_DECL(var, type, dim1, dim2, ddim1, ddim2) \
type POLYBENCH_2D(POLYBENCH_DECL_VAR(var), dim1, dim2, ddim1, ddim2);
# define POLYBENCH_3D_ARRAY_DECL(var, type, dim1, dim2, dim3, ddim1, ddim2, ddim3) \
type POLYBENCH_3D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, ddim1, ddim2, ddim3);
# define POLYBENCH_4D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4) \
type POLYBENCH_4D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, dim4, ddim1, ddim2, ddim3, ddim4);
# define POLYBENCH_5D_ARRAY_DECL(var, type, dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5) \
type POLYBENCH_5D(POLYBENCH_DECL_VAR(var), dim1, dim2, dim3, dim4, dim5, ddim1, ddim2, ddim3, ddim4, ddim5);
# endif
/* Dead-code elimination macros. Use argc/argv for the run-time check. */
# ifndef POLYBENCH_DUMP_ARRAYS
# define POLYBENCH_DCE_ONLY_CODE if (argc > 42 && ! strcmp(argv[0], ""))
# else
# define POLYBENCH_DCE_ONLY_CODE
# endif
# define polybench_prevent_dce(func) \
POLYBENCH_DCE_ONLY_CODE \
func
/* Performance-related instrumentation. See polybench.c */
# define polybench_start_instruments
# define polybench_stop_instruments
# define polybench_print_instruments
/* PAPI support. */
# ifdef POLYBENCH_PAPI
extern const unsigned int polybench_papi_eventlist[];
# undef polybench_start_instruments
# undef polybench_stop_instruments
# undef polybench_print_instruments
# define polybench_set_papi_thread_report(x) \
polybench_papi_counters_threadid = x;
# define polybench_start_instruments \
polybench_prepare_instruments(); \
polybench_papi_init(); \
int evid; \
for (evid = 0; polybench_papi_eventlist[evid] != 0; evid++) \
{ \
if (polybench_papi_start_counter(evid)) \
continue; \
# define polybench_stop_instruments \
polybench_papi_stop_counter(evid); \
} \
polybench_papi_close(); \
# define polybench_print_instruments polybench_papi_print();
# endif
/* Timing support. */
# if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS)
# undef polybench_start_instruments
# undef polybench_stop_instruments
# undef polybench_print_instruments
# define polybench_start_instruments polybench_timer_start();
# define polybench_stop_instruments polybench_timer_stop();
# define polybench_print_instruments polybench_timer_print();
extern double polybench_program_total_flops;
extern void polybench_timer_start();
extern void polybench_timer_stop();
extern void polybench_timer_print();
# endif
/* Function declaration. */
# ifdef POLYBENCH_TIME
extern void polybench_timer_start();
extern void polybench_timer_stop();
extern void polybench_timer_print();
# endif
# ifdef POLYBENCH_PAPI
extern void polybench_prepare_instruments();
extern int polybench_papi_start_counter(int evid);
extern void polybench_papi_stop_counter(int evid);
extern void polybench_papi_init();
extern void polybench_papi_close();
extern void polybench_papi_print();
# endif
/* Function prototypes. */
extern void* polybench_alloc_data(unsigned long long int n, int elt_size);
/*
LLVM: I'm appending the content of the file polybench.c here. It'll avoid us
to have to copy it to the folder being compiled in the LLVM test suite.
*/
/**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
/* To avoid calling printf M*M times (and make it run
for a long time), we split the output into an encoded string,
and print it as a simple char pointer, M times. */
static inline
void print_element(double el, int pos, char *out)
{
union {
double datum;
char bytes[8];
} block;
block.datum = el;
/* each nibble as a char, within the printable range */
#ifdef __BIG_ENDIAN__
*(out+pos+15) = (block.bytes[0]&0xF0>>4)+'0';
*(out+pos+14) = (block.bytes[0]&0x0F) +'0';
*(out+pos+13) = (block.bytes[1]&0xF0>>4)+'0';
*(out+pos+12) = (block.bytes[1]&0x0F) +'0';
*(out+pos+11) = (block.bytes[2]&0xF0>>4)+'0';
*(out+pos+10) = (block.bytes[2]&0x0F) +'0';
*(out+pos+9) = (block.bytes[3]&0xF0>>4)+'0';
*(out+pos+8) = (block.bytes[3]&0x0F) +'0';
*(out+pos+7) = (block.bytes[4]&0xF0>>4)+'0';
*(out+pos+6) = (block.bytes[4]&0x0F) +'0';
*(out+pos+5) = (block.bytes[5]&0xF0>>4)+'0';
*(out+pos+4) = (block.bytes[5]&0x0F) +'0';
*(out+pos+3) = (block.bytes[6]&0xF0>>4)+'0';
*(out+pos+2) = (block.bytes[6]&0x0F) +'0';
*(out+pos+1) = (block.bytes[7]&0xF0>>4)+'0';
*(out+pos) = (block.bytes[7]&0x0F) +'0';
#else
*(out+pos) = (block.bytes[0]&0xF0>>4)+'0';
*(out+pos+1) = (block.bytes[0]&0x0F) +'0';
*(out+pos+2) = (block.bytes[1]&0xF0>>4)+'0';
*(out+pos+3) = (block.bytes[1]&0x0F) +'0';
*(out+pos+4) = (block.bytes[2]&0xF0>>4)+'0';
*(out+pos+5) = (block.bytes[2]&0x0F) +'0';
*(out+pos+6) = (block.bytes[3]&0xF0>>4)+'0';
*(out+pos+7) = (block.bytes[3]&0x0F) +'0';
*(out+pos+8) = (block.bytes[4]&0xF0>>4)+'0';
*(out+pos+9) = (block.bytes[4]&0x0F) +'0';
*(out+pos+10) = (block.bytes[5]&0xF0>>4)+'0';
*(out+pos+11) = (block.bytes[5]&0x0F) +'0';
*(out+pos+12) = (block.bytes[6]&0xF0>>4)+'0';
*(out+pos+13) = (block.bytes[6]&0x0F) +'0';
*(out+pos+14) = (block.bytes[7]&0xF0>>4)+'0';
*(out+pos+15) = (block.bytes[7]&0x0F) +'0';
#endif
}
#endif /* !POLYBENCH_H */
|
reduction.h | #ifndef FAASM_REDUCTION_H
#define FAASM_REDUCTION_H
#include <cstdint>
#include "faasm/core.h"
#include "faasm/random.h"
#include <cstdio>
#include <cstring>
#include <faasm/array.h>
#include <omp.h>
#include <random>
#include <string>
template<typename T>
class FaasmCounter
{
private:
union State
{
T x;
uint8_t buf; // Ideally uint8_t[sizeof(T)] but this type doesn't match
// uint8_t* in the state API
};
static union State readState(const char* key)
{
union State val;
faasmReadState(key, &val.buf, sizeof(State));
return val;
}
static void writeState(const char* key, union State val)
{
faasmWriteState(key, &val.buf, sizeof(State));
}
public:
static void init(const char* counterKey, T val)
{
union State data
{
.x = val
};
writeState(counterKey, data);
}
static int getCounter(const char* counterKey)
{
return readState(counterKey).x;
}
static int incrby(const char* counterKey, T increment)
{
faasmLockStateGlobal(counterKey);
union State val = readState(counterKey);
val.x += increment;
writeState(counterKey, val);
faasmUnlockStateGlobal(counterKey);
return val.x;
}
};
class i64
{
private:
int64_t x = 0;
// Depending on the number of threads and the reduction, a copy constructor
// with the initial reductor is created on some paths (i.e. for certain
// threads). A better implementation would deal with references only so the
// copy constructor is as cheap as possible. We would also like to support
// coercion from all arithmetic operators by having a logical default
// constructor which we need to have no overhead compared to a raw
// arithmetic type For now we keep this shorter than small string
// optimisations, could do cache line optimisation too.
std::string reductionKey;
std::int32_t numThreads;
explicit i64() = default;
int64_t accumulate() const
{
faasm::AsyncArray<std::int64_t> arr(reductionKey.c_str(), numThreads);
arr.pull();
int64_t acc = 0;
for (int i = 0; i < numThreads; i++) {
acc += arr[i];
}
return acc;
}
public:
// Ensures no silly copy is made
i64(const i64& other) = delete;
// Should be called on reduction init only and not in user code. This would
// be enforced by compiler. For now we make the single argument constructor
// private.
static i64 threadNew() { return i64(); }
// Used by user on initialisation
explicit i64(int64_t x)
: x(x)
, reductionKey(faasm::randomString(11))
, numThreads(omp_get_max_threads())
{
faasm::AsyncArray<std::int64_t> arr(reductionKey.c_str(), numThreads);
arr.zero();
}
void redisSum(i64& threadResult)
{
faasm::AsyncArray<std::int64_t> arr(reductionKey.c_str(), numThreads);
arr.pullLazy();
arr[omp_get_thread_num()] = threadResult;
arr.push();
}
/*
* Overloadings: ideally we could want to take any type of input that
* coerces to int64_t via the one argument constructor. However at the
* moment we use the one argument constructor to create a random string (on
* initialisation of the first reductor) which allows us to make this type
* completely transparent in the native implementation (i64 = int64_t). I
* think concepts to specialise those operators can help.
*/
i64& operator+=(const int64_t other)
{
x += other;
return *this;
}
i64& operator-=(const int64_t other)
{
x -= other;
return *this;
}
i64& operator++()
{
++x;
return *this;
}
i64& operator--()
{
--x;
return *this;
}
operator double() const { return (double)accumulate(); }
operator int64_t() const { return accumulate(); }
};
#pragma omp declare reduction \
(+: i64: omp_out.redisSum(omp_in)) \
initializer(omp_priv=i64::threadNew())
#else // i.e not __wasm__
using i64 = int64_t;
#endif
|
array.h | /** @file array.h
@brief Defines a container class template for components of a NF model.
The Array class is used as a container for Population, Propagator, Coupling,
and Dendrite classes. It provides a `step()` member function that iteratively
calls the step method for each element.
@author Peter Drysdale, Felix Fung,
*/
#ifndef NFTSIM_SRC_ARRAY_H
#define NFTSIM_SRC_ARRAY_H
// C++ standard library headers
#include <vector> // std::vector;
template<class T>
class Array {
Array(const Array&); ///< No copy constructor allowed.
std::vector<T*> m; ///< The class' main data, contains pointers to NF objects.
public:
using size_type = typename std::vector<T>::size_type;
virtual void step(); ///< iteratively calls `step()` member of Array's elements.
virtual void pstep(); ///< OpenMP parallel version of `Array::step()` //NOTE: OpenMP pragma currently disabled.
void add(T* t); ///< adds a pointer to a NF object to the Array.
void add(std::vector<T*> t); ///< adds a vector of pointers to NF objects to the Array.
bool empty() const; ///< accesses underlying vector's `empty()` member.
inline T* operator[]( size_type index ) const; ///< accesses underlying vector's `operator[]`.
size_type size() const; ///< accesses underlying vector's `size()` member.
Array<T>();
virtual ~Array();
};
template<class T>
void Array<T>::add( T* t ) {
m.push_back(t);
}
template<class T>
void Array<T>::add( std::vector<T*> t ) {
for( size_type i=0; i<t.size(); i++ ) {
m.push_back( t[i] );
}
}
template<class T>
bool Array<T>::empty() const {
return m.empty();
}
template<class T>
void Array<T>::step() {
for( size_type i=0; i<m.size(); i++ ) {
m[i]->step();
}
}
template<class T>
void Array<T>::pstep() {
// Note pstep() is needed as well as step() because output must use
// step so that it is not parallelized
//#pragma omp parallel for num_threads(5)
for( size_type i=0; i<m.size(); i++ ) {
m[i]->step();
}
}
template<class T>
Array<T>::Array() = default;
template<class T>
Array<T>::~Array() {
for( size_type i=0; i<m.size(); i++ ) {
if( m[i] ) {
delete m[i];
}
}
}
template<class T>
T* Array<T>::operator[]( size_type index ) const {
return m[index];
}
template<class T>
typename Array<T>::size_type Array<T>::size() const {
return m.size();
}
#endif //NFTSIM_SRC_ARRAY_H
|
Quicksort.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <time.h>
// array init modes -> "random", "crescent", "decrescent"
#define ARRAY_INIT_MODE "random"
#define ARRAY_SIZE 500000
// quicksort modes -> "sequential", "tasks", "tasks_and_for", "sections"
#define QUICKSORT_MODE "tasks_and_for"
#define parallelStop partitionSize <= 2500
/*
* 5000
* 2500 **
* 1200
* 300
*/
void swap(int index1, int index2, int *array)
{
int copy1 = array[index1];
array[index1] = array[index2];
array[index2] = copy1;
}
// ------------------- QUICKSORT PARALELO COM TAREFAS
void parallelDivideTasks(int initialLeft, int initialRight, int *array)
{
int left = initialLeft;
int right = initialRight;
int pivot = array[ (left + right) / 2 ];
int leftElement;
int rightElement;
const int partitionSize = initialRight - initialLeft + 1;
while (left <= right)
{
leftElement = array[left];
rightElement = array[right];
// cria uma tarefa para alguma thread executar e
// compartilha com essa tarefa as variaveis left e
// leftElement da thread atual
//#pragma omp task shared(left, leftElement)
while (left < initialRight && leftElement < pivot)
{
leftElement = array[++left];
}
//#pragma omp task shared(right, rightElement)
while (right > initialLeft && rightElement > pivot)
{
rightElement = array[--right];
}
// espera as tarefas que essa thread criou terminar
//#pragma omp taskwait
if (left <= right)
{
swap(left++, right--, array);
}
}
if (parallelStop)
{
if (right > initialLeft)
{
parallelDivideTasks(initialLeft, right, array);
}
if (left < initialRight)
{
parallelDivideTasks(left, initialRight, array);
}
}
else
{
if (right > initialLeft)
{
#pragma omp task
parallelDivideTasks(initialLeft, right, array);
}
if (left < initialRight)
{
#pragma omp task
parallelDivideTasks(left, initialRight, array);
}
}
}
void parallelQuicksortTasks(int *array, int arrayLength)
{
#pragma omp parallel // omp parallel -> cria uma regiao paralela
// omp single -> apenas uma thread executa a proxima linha,
// alem disso, as outras threads so' iniciam suas atividades
// depois que essa linha for executada
#pragma omp single
parallelDivideTasks(0, arrayLength - 1, array);
}
// ------------------- FIM DO QUICKSORT PARALELO COM TAREFAS
// ------------------- QUICKSORT PARALELO COM FOR E TAREFAS
void parallelDivideFor(int initialLeft, int initialRight, int *array)
{
const int partitionSize = initialRight - initialLeft + 1;
int pivot = array[ (initialLeft + initialRight) / 2 ];
int lessThanPivot[partitionSize];
int greaterThanPivot[partitionSize];
int ltPivotCounter = 0;
int gtPivotCounter = 0;
int currentElement;
int i;
if (parallelStop)
{
// percorre o arranjo separando os elementos que
// sao menores que o pivo e maiores ou iguais a ele
for (i = 0; i < partitionSize; i++)
{
currentElement = array[initialLeft + i];
if (currentElement < pivot)
{
lessThanPivot[ltPivotCounter++] = currentElement;
}
else
{
greaterThanPivot[gtPivotCounter++] = currentElement;
}
}
// percorre o arranjo dos menores que o pivo e os
// coloca na parte esquerda do arranjo original
for (i = 0; i < ltPivotCounter; i++)
{
array[initialLeft + i] = lessThanPivot[i];
}
// percorre o arranjo dos maiores ou iguais ao pivo
// e os coloca na parte direita do arranjo original
for (i = 0; i < gtPivotCounter; i++)
{
array[initialLeft + ltPivotCounter + i] = greaterThanPivot[i];
}
if (ltPivotCounter > 1)
{
parallelDivideFor(initialLeft, initialLeft + ltPivotCounter - 1, array);
}
if (gtPivotCounter > 1)
{
parallelDivideFor(initialLeft + ltPivotCounter, initialLeft + ltPivotCounter + gtPivotCounter - 1, array);
}
}
else
{
#pragma omp parallel for private(currentElement)
for (i = 0; i < partitionSize; i++)
{
currentElement = array[initialLeft + i];
if (currentElement < pivot)
{
lessThanPivot[ltPivotCounter++] = currentElement;
}
else
{
greaterThanPivot[gtPivotCounter++] = currentElement;
}
}
// cria uma tarefa para alguma thread executar e,
// ao mesmo tempo, uma copia local de i para a thread
// que for executar a tarefa
#pragma omp task private(i)
for (i = 0; i < ltPivotCounter; i++)
{
array[initialLeft + i] = lessThanPivot[i];
}
#pragma omp task private(i)
for (i = 0; i < gtPivotCounter; i++)
{
array[initialLeft + ltPivotCounter + i] = greaterThanPivot[i];
}
// espera as tarefas que essa thread criou terminar
#pragma omp taskwait
if (ltPivotCounter > 1)
{
#pragma omp task
parallelDivideFor(initialLeft, initialLeft + ltPivotCounter - 1, array);
}
if (gtPivotCounter > 1)
{
#pragma omp task
parallelDivideFor(initialLeft + ltPivotCounter, initialLeft + ltPivotCounter + gtPivotCounter - 1, array);
}
}
}
void parallelQuicksortFor(int *array, int arrayLength)
{
#pragma omp parallel // omp parallel -> cria uma regiao paralela
// omp single -> apenas uma thread executa a proxima linha,
// alem disso, as outras threads so' iniciam suas atividades
// depois que essa linha for executada
#pragma omp single
parallelDivideFor(0, arrayLength - 1, array);
}
// ------------------- FIM DO QUICKSORT PARALELO COM FOR E TAREFAS
// ------------------- QUICKSORT PARALELO COM SECOES
void parallelDivideSections(int initialLeft, int initialRight, int *array)
{
int left = initialLeft;
int right = initialRight;
int pivot = array[ (left + right) / 2 ];
int leftElement;
int rightElement;
const int partitionSize = initialRight - initialLeft + 1;
while (left <= right)
{
leftElement = array[left];
rightElement = array[right];
while (left < initialRight && leftElement < pivot)
{
leftElement = array[++left];
}
while (right > initialLeft && rightElement > pivot)
{
rightElement = array[--right];
}
if (left <= right)
{
swap(left++, right--, array);
}
}
if (parallelStop)
{
if (right > initialLeft)
{
parallelDivideSections(initialLeft, right, array);
}
if (left < initialRight)
{
parallelDivideSections(left, initialRight, array);
}
}
else
{
#pragma omp parallel sections
{
#pragma omp section // cada secao e' enviada para uma thread
right > initialLeft ? parallelDivideSections(initialLeft, right, array) : 0;
#pragma omp section
left < initialRight ? parallelDivideSections(left, initialRight, array) : 0;
}
}
}
void parallelQuicksortSections(int *array, int arrayLength)
{
parallelDivideSections(0, arrayLength - 1, array);
}
// ------------------- FIM DO QUICKSORT PARALELO COM SECOES
// ------------------- QUICKSORT SEQUENCIAL
void sequentialDivide(int initialLeft, int initialRight, int *array)
{
int left = initialLeft;
int right = initialRight;
int pivot = array[ (left + right) / 2 ];
int leftElement;
int rightElement;
while (left <= right)
{
leftElement = array[left];
rightElement = array[right];
while (left < initialRight && leftElement < pivot)
{
leftElement = array[++left];
}
while (right > initialLeft && rightElement > pivot)
{
rightElement = array[--right];
}
if (left <= right)
{
swap(left++, right--, array);
}
}
if (right > initialLeft)
{
sequentialDivide(initialLeft, right, array);
}
if (left < initialRight)
{
sequentialDivide(left, initialRight, array);
}
}
void sequentialQuicksort(int *array, int arrayLength)
{
sequentialDivide(0, arrayLength - 1, array);
}
// ------------------- FIM DO QUICKSORT SEQUENCIAL
// ------------------- FIM DOS QUICKSORTS
// ------------------- FUNCOES DE UTILIDADE
// preenchimento do array -> { 1, 2, 3, ..., arrayLength }
void initCrescentArray(int *array, int arrayLength)
{
int i;
#pragma omp parallel for
for (i = 0; i < arrayLength; i++)
{
array[i] = i + 1;
}
}
// preenchimento do array -> { arrayLength, arrayLength - 1, ..., 2, 1 }
void initDecrescentArray(int *array, int arrayLength)
{
int i;
#pragma omp parallel for
for (i = 0; i < arrayLength; i++)
{
array[i] = arrayLength - i;
}
}
void initRandomArray(int *array, int arrayLength)
{
int i;
srand( time(NULL) );
// a paralelizacao desse looping gera problemas
// na geracao dos numeros aleatorios
for (i = 0; i < arrayLength; i++)
{
array[i] = rand() % arrayLength + 1;
}
}
void initArray(int *array, const char *mode, int arrayLength)
{
if (strcmp(mode, "random") == 0)
initRandomArray(array, arrayLength);
else if (strcmp(mode, "crescent") == 0)
initCrescentArray(array, arrayLength);
else if (strcmp(mode, "decrescent") == 0)
initDecrescentArray(array, arrayLength);
}
// checa se a ordenacao do arranjo esta' incorreta
int ordenationIsWrong(int *array, int arrayLength)
{
int wrong = 0;
int i;
#pragma omp parallel for reduction(+:wrong)
for (i = 0; i < arrayLength - 1; i++)
{
wrong = ( wrong ? 1 : array[i] > array[i + 1] );
}
return wrong > 0;
}
void quicksort(int *array, const char *mode, int arrayLength)
{
if (strcmp(mode, "sequential") == 0)
sequentialQuicksort(array, arrayLength);
else if (strcmp(mode, "tasks") == 0)
parallelQuicksortTasks(array, arrayLength);
else if (strcmp(mode, "tasks_and_for") == 0)
parallelQuicksortFor(array, arrayLength);
else if (strcmp(mode, "sections") == 0)
parallelQuicksortSections(array, arrayLength);
}
int main()
{
int array[ARRAY_SIZE];
initArray(array, ARRAY_INIT_MODE, ARRAY_SIZE);
quicksort(array, QUICKSORT_MODE, ARRAY_SIZE);
if (ordenationIsWrong(array, ARRAY_SIZE)) printf("Ordenacao errada\n");
return EXIT_SUCCESS;
} |
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LambdaMangleContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/ModuleLoader.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include <deque>
#include <string>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class NonNullAttr;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TargetAttributesSema;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) LLVM_DELETED_FUNCTION;
void operator=(const Sema &) LLVM_DELETED_FUNCTION;
mutable const TargetAttributesSema* TheTargetAttributesSema;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
static bool
shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
return !Old->isHidden() || New->hasExternalLinkage();
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
OwningPtr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief A mapping from external names to the most recent
/// locally-scoped extern "C" declaration with that name.
///
/// This map contains external declarations introduced in local
/// scopes, e.g.,
///
/// \code
/// extern "C" void f() {
/// void foo(int, int);
/// }
/// \endcode
///
/// Here, the name "foo" will be associated with the declaration of
/// "foo" within f. This name is not visible outside of
/// "f". However, we still find it in two cases:
///
/// - If we are declaring another global or extern "C" entity with
/// the name "foo", we can find "foo" as a previous declaration,
/// so that the types of this external declaration can be checked
/// for compatibility.
///
/// - If we would implicitly declare "foo" (e.g., due to a call to
/// "foo" in C when no prototype or definition is visible), then
/// we find this declaration of "foo" and complain that it is
/// not visible.
llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
llvm::DenseMap<DeclarationName, NamedDecl *>::iterator
findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the destructors seen during a class definition that had their
/// exception spec computation delayed because it depended on an unparsed
/// exception spec.
SmallVector<CXXDestructorDecl*, 2> DelayedDestructorExceptionSpecs;
/// \brief All the overriding destructors seen during a class definition
/// (there could be multiple due to nested classes) that had their exception
/// spec checks delayed, plus the overridden destructor.
SmallVector<std::pair<const CXXDestructorDecl*,
const CXXDestructorDecl*>, 2>
DelayedDestructorExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, const FunctionDecl *FD);
LateTemplateParserCB *LateTemplateParser;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) {
LateTemplateParser = LTP;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(0) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != 0; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = 0;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == NULL);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = 0;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
OwningPtr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statment).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for the lambda expression
/// if the normal declaration context does not suffice, e.g., in a
/// default function argument.
Decl *LambdaContextDecl;
/// \brief The context information used to mangle lambda expressions
/// within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions.
IntrusiveRefCntPtr<LambdaMangleContext> LambdaMangle;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *LambdaContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
LambdaContextDecl(LambdaContextDecl), LambdaMangle() { }
/// \brief Retrieve the mangling context for lambdas.
LambdaMangleContext &getLambdaMangleContext() {
assert(LambdaContextDecl && "Need to have a lambda context declaration");
if (!LambdaMangle)
LambdaMangle = new LambdaMangleContext;
return *LambdaMangle;
}
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, SmallVector<ParmVarDecl *, 1> >
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
llvm::SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
typedef llvm::MCAsmParserSemaCallback::InlineAsmIdentifierInfo
InlineAsmIdentifierInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = 0);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
const TargetAttributesSema &getTargetAttributesSema() const;
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string getFixItZeroInitializerForType(QualType T) const;
std::string getFixItZeroLiteralForType(QualType T) const;
ExprResult Owned(Expr* E) { return E; }
ExprResult Owned(ExprResult R) { return R; }
StmtResult Owned(Stmt* S) { return S; }
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
void PushLambdaScope(CXXRecordDecl *Lambda, CXXMethodDecl *CallOperator);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP =0,
const Decl *D = 0, const BlockExpr *blkExpr = 0);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda expression, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = 0);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = 0);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
llvm::MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = 0);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = 0,
bool *MissingEmptyExceptionSpecification = 0,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template<typename T1>
class BoundTypeDiagnoser1 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
public:
BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
if (Suppressed) return;
S.Diag(Loc, DiagID) << getPrintable(Arg1) << T;
}
virtual ~BoundTypeDiagnoser1() { }
};
template<typename T1, typename T2>
class BoundTypeDiagnoser2 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
const T2 &Arg2;
public:
BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1,
const T2 &Arg2)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
Arg2(Arg2) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
if (Suppressed) return;
S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T;
}
virtual ~BoundTypeDiagnoser2() { }
};
template<typename T1, typename T2, typename T3>
class BoundTypeDiagnoser3 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
const T2 &Arg2;
const T3 &Arg3;
public:
BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1,
const T2 &Arg2, const T3 &Arg3)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
Arg2(Arg2), Arg3(Arg3) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
if (Suppressed) return;
S.Diag(Loc, DiagID)
<< getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T;
}
virtual ~BoundTypeDiagnoser3() { }
};
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template<typename T1>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireCompleteType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireCompleteType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template<typename T1>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireCompleteExprType(E, Diagnoser);
}
template<typename T1, typename T2>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireCompleteExprType(E, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
const T2 &Arg2, const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template<typename T1>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireLiteralType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireLiteralType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
QualType BuildDecltypeType(Expr *E, SourceLocation Loc);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
/// Nonzero if we are currently parsing a function declarator. This is a counter
/// as opposed to a boolean so we can deal with nested function declarators
/// such as:
/// void f(void (*g)(), ...)
unsigned InFunctionDeclarator;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = 0);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = 0,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = 0);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
bool DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template;
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = 0);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND,
const LookupResult &Previous,
Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl* ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
void ActOnStartFunctionDeclarator();
void ActOnEndFunctionDeclarator();
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
void checkVoidParamDecl(ParmVarDecl *Param);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnCXXForRangeDecl(Decl *D);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
Decl **Group,
unsigned NumDecls);
DeclGroupPtrTy BuildDeclaratorGroup(Decl **Group, unsigned NumDecls,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(Decl **Group, unsigned NumDecls);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief Create an implicit import of the given module at the given
/// source location.
///
/// This routine is typically used for error recovery, when the entity found
/// by name lookup is actually hidden within a module that we know about but
/// the user has forgotten to import.
void createImplicitModuleImport(SourceLocation Loc, Module *Mod);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo &Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = 0);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param ExplicitInstantiationOrSpecialization When true, we are checking
/// whether the declaration is in scope for the purposes of explicit template
/// instantiation or specialization. The default is false.
bool isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S = 0,
bool ExplicitInstantiationOrSpecialization = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, StringRef Format,
int FormatIdx, int FirstArg,
unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &OldDecls,
bool OldDeclsWereHidden);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool OldIsHidden);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = 0);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg ///< Value of a non-type template parameter.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
/// \brief Abstract base class used to diagnose problems that occur while
/// trying to convert an expression to integral or enumeration type.
class ICEConvertDiagnoser {
public:
bool Suppress;
bool SuppressConversion;
ICEConvertDiagnoser(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) { }
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual DiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc,
QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual DiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc,
QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual DiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc,
QualType T,
QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual DiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual DiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc,
QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual DiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv,
QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual DiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc,
QualType T,
QualType ConvTy) = 0;
virtual ~ICEConvertDiagnoser() {}
};
ExprResult
ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *FromE,
ICEConvertDiagnoser &Diagnoser,
bool AllowScopedEnumerations);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
bool Operator, SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = 0);
FunctionDecl *ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair* Found = 0);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc, Expr **Args,
unsigned NumArgs, SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl **Param, ParmVarDecl **ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
private:
bool CppLookupName(LookupResult &R, Scope *S);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
public:
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRawAndTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, bool Operator,
SourceLocation Loc,
ArrayRef<Expr *> Args,
ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext = 0,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = 0);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage,
bool ExplicitInstantiationOrSpecialization);
bool DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
bool NonInheritable = true,
bool Inheritable = true);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool NonInheritable = true,
bool Inheritable = true,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = 0);
bool CheckNoReturnAttr(const AttributeList &attr);
void CheckAlignasUnderalignment(Decl *D);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
bool &IncompleteImpl, unsigned DiagID);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl,
ObjCInterfaceDecl *IDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckProtocolMethodDefs - This routine checks unimplemented
/// methods declared in protocol, and those referenced by it.
void CheckProtocolMethodDefs(SourceLocation ImpLoc,
ObjCProtocolDecl *PDecl,
bool& IncompleteImpl,
const SelectorSet &InsMap,
const SelectorSet &ClsMap,
ObjCContainerDecl *CDecl);
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// CollectImmediateProperties - This routine collects all properties in
/// the class and its conforming protocols; but not those it its super class.
void CollectImmediateProperties(ObjCContainerDecl *CDecl,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = 0);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool warn, bool instance);
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/false);
}
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(0) { }
// FIXME: The const_cast here is ugly. RValue references would make this
// much nicer (or we could duplicate a bunch of the move semantics
// emulation code from Ownership.h).
FullExprArg(const FullExprArg& Other) : E(Other.E) {}
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).release());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.release());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
MultiStmtArg Elts,
bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
MultiStmtArg Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
StmtResult ActOnSEHFinallyBlock(SourceLocation Loc,
Stmt *Block);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void EmitDeprecationWarning(NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty);
void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=0);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
Expr **Args, unsigned NumArgs);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = 0,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = 0);
/// \brief Figure out if an expression could be turned into a call.
bool isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = 0);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0,
ArrayRef<Expr *> Args = None);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = 0);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = 0,
NamedDecl *FoundD = 0);
ExprResult
BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
Expr *baseObjectExpr = 0,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
NamedDecl *D, NamedDecl *FoundD = 0);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr*> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = 0);
ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = 0);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks,
Scope *UDLScope = 0);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
MultiTypeArg ArgTypes,
MultiExprArg ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
TypeSourceInfo **Types,
Expr **Exprs,
unsigned NumAssocs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
bool HasTrailingLParen;
};
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = 0);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base,
bool &IsArrow, SourceLocation OpLoc,
CXXScopeSpec &SS,
Decl *ObjCImpDecl,
bool HasTemplateArgs);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl,
bool HasTrailingLParen);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = 0, bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
Expr *Config = 0,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool isTypeName,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool IsTypeName,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool IsTypeName,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can re remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const {
EPI.ExceptionSpecType = getExceptionSpecType();
if (EPI.ExceptionSpecType == EST_Dynamic) {
EPI.NumExceptions = size();
EPI.Exceptions = data();
} else if (EPI.ExceptionSpecType == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
EPI.ExceptionSpecType = EST_ComputedNoexcept;
EPI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).take();
}
}
FunctionProtoType::ExtProtoInfo getEPI() const {
FunctionProtoType::ExtProtoInfo EPI;
getEPI(EPI);
return EPI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// extended prototype information with the results.
void checkExceptionSpecification(ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExtProtoInfo &EPI);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
bool IsThrownVarInScope);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
Expr **PlaceArgs, unsigned NumPlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, Expr** Args,
unsigned NumArgs, DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Argument,
bool addMallocAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// ActOnUnaryTypeTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
SourceLocation KWLoc,
ParsedType Ty,
SourceLocation RParen);
ExprResult BuildUnaryTypeTrait(UnaryTypeTrait OTT,
SourceLocation KWLoc,
TypeSourceInfo *T,
SourceLocation RParen);
/// ActOnBinaryTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnBinaryTypeTrait(BinaryTypeTrait OTT,
SourceLocation KWLoc,
ParsedType LhsTy,
ParsedType RhsTy,
SourceLocation RParen);
ExprResult BuildBinaryTypeTrait(BinaryTypeTrait BTT,
SourceLocation KWLoc,
TypeSourceInfo *LhsT,
TypeSourceInfo *RhsT,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType,
bool HasTrailingLParen);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName,
bool HasTrailingLParen);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS,
bool HasTrailingLParen);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
bool isUnknownSpecialization(const CXXScopeSpec &SS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Introduce the scope for a lambda expression.
sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope,
bool IsInstantiation = false);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *" or "NSString *" depending on the type of
/// ValueType, which is allowed to be a built-in numeric type or
/// "char *" or "const char *".
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
SourceLocation LangLoc,
StringRef Lang,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = 0);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = 0);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
Expr **Args, unsigned NumArgs,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
typedef LazyVector<CXXRecordDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDynamicClasses, 2, 2>
DynamicClassesType;
/// \brief A list of all of the dynamic classes in this translation
/// unit.
DynamicClassesType DynamicClasses;
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnReenterDeclaratorTemplateScope(Scope *S, DeclaratorDecl *D);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, bool Flag = true);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedExplicitlyDefaultedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = 0,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(Decl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template<typename T1>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter
/// list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *
MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
SourceLocation DeclLoc,
const CXXScopeSpec &SS,
TemplateParameterList **ParamLists,
unsigned NumParamLists,
bool IsFriend,
bool &IsExplicitSpecialization,
bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument> &Converted);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
const TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
///
/// \param ExpansionIntoFixedList If non-NULL, will be set true to indicate
/// when the template arguments contain a pack expansion that is being
/// expanded into a fixed parameter list.
///
/// \returns True if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool *ExpansionIntoFixedList = 0);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
const TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
const TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
SubstituteExplicitTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes,
QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = 0);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments);
UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin,
UnresolvedSetIterator SEnd,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true,
QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = 0,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = 0);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(0), Entity(0), TemplateArgs(0),
NumTemplateArgs(0), DeductionInfo(0) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
if (X.Template != Y.Template)
return false;
// Fall through
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type or template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
operator bool() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.`
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection>
UnqualifiedTyposCorrectedMap;
/// \brief A cache containing the results of typo correction for unqualified
/// name lookup.
///
/// The string is the string that we corrected to (which may be empty, if
/// there was no correction), while the boolean will be true when the
/// string represents a keyword.
UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = 0);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = 0,
LocalInstantiationScope *OuterMostScope = 0);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = 0,
ObjCContainerDecl *lexicalDC = 0);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
void MatchOneProtocolPropertiesInClass(Decl *CDecl,
ObjCProtocolDecl *PDecl);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
Decl **allMethods = 0, unsigned allNum = 0,
Decl **allProperties = 0, unsigned pNum = 0,
DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = 0);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
// OpenMP directives and clauses.
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
Scope *CurScope,
ArrayRef<DeclarationNameInfo> IdList);
/// \brief Build a new OpenMPThreadPrivateDecl and check its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<DeclRefExpr *> VarList);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = 0,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstProtoArg,
Expr **Args, unsigned NumArgs,
SmallVector<Expr *, 8> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Checks to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic and returning NULL if not.
bool variadicArgumentPODCheck(const Expr *E, VariadicCallType CT);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = 0);
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = 0);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = 0);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = 0);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = 0) {
Expr *E1Tmp = E1.take(), *E2Tmp = E2.take();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = Owned(E1Tmp);
E2 = Owned(E2Tmp);
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
Expr **Args, unsigned NumArgs, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result=0);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, Expr *BitWidth,
bool *ZeroWidth = 0);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(CUDAFunctionTarget CallerTarget,
CUDAFunctionTarget CalleeTarget);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) {
return CheckCUDATarget(IdentifyCUDATarget(Caller),
IdentifyCUDATarget(Callee));
}
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(Decl *Constructor,
CXXCtorInitializer** Initializers,
unsigned NumInitializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
IdentifierInfo **SelIdents,
unsigned NumSelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
IdentifierInfo **SelIdents,
unsigned NumSelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
IdentifierInfo **SelIdents,
unsigned NumSelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = 0);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
IdentifierInfo **SelIdents,
unsigned NumSelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
IdentifierInfo **SelIdents,
unsigned NumSelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=0,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args,
unsigned NumProtoArgs, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinObjectSize(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
enum StringLiteralCheckType {
SLCT_NotALiteral,
SLCT_UncheckedLiteral,
SLCT_CheckedLiteral
};
StringLiteralCheckType checkFormatStringExpr(const Expr *E,
ArrayRef<const Expr *> Args,
bool HasVAListArg,
unsigned format_idx,
unsigned firstDataArg,
FormatStringType Type,
VariadicCallType CallType,
bool inFunctionCall = true);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType);
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range);
void CheckNonNullArguments(const NonNullAttr *NonNull,
const Expr * const *ExprArgs,
SourceLocation CallSiteLoc);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
OwningPtr<llvm::DenseMap<TypeTagMagicValue, TypeTagData> >
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTWriter;
public:
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
IdentifierInfo *getSuperIdentifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = 0,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
} // end namespace clang
#endif
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unop__minv_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_uint8_uint8
// op(A') function: GB_unop_tran__minv_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_uint8_uint8
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 8) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
uint8_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
clauses-1.c | /* { dg-do compile } */
/* { dg-additional-options "-std=c99" { target c } } */
int t;
#pragma omp threadprivate (t)
#pragma omp declare target
int f, l, ll, r;
void
foo (int d, int m, int i1, int i2, int p, int *idp, int s,
int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q)
{
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) \
safelen(8) simdlen(4) aligned(q: 32)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32) reduction(+:r)
for (int i = 0; i < 64; i++)
ll++;
}
#pragma omp end declare target
void
bar (int d, int m, int i1, int i2, int p, int *idp, int s,
int nte, int tl, int nth, int g, int nta, int fi, int pp, int *q, int *dd)
{
#pragma omp for simd \
private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait \
safelen(8) simdlen(4) aligned(q: 32)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for simd \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
safelen(8) simdlen(4) aligned(q: 32)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel sections \
private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l)
{
#pragma omp section
{}
#pragma omp section
{}
}
#pragma omp target parallel \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
nowait depend(inout: dd[0])
;
#pragma omp target parallel for \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target parallel for simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0])
;
#pragma omp target teams distribute \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
;
#pragma omp target teams distribute parallel for \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute parallel for simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r) \
nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) final(fi) mergeable nogroup priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskwait
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) if(taskloop: i1) final(fi) priority (pp) \
safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target nowait depend(inout: dd[0])
#pragma omp teams distribute \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16)
for (int i = 0; i < 64; i++)
;
#pragma omp target
#pragma omp teams distribute parallel for \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
#pragma omp teams distribute parallel for simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) \
safelen(8) simdlen(4) aligned(q: 32)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
safelen(8) simdlen(4) aligned(q: 32)
for (int i = 0; i < 64; i++)
ll++;
}
|
pmv-OpenMP-b.c | #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_set_dynamic(0);
#define omp_set_num_threads(4);
#endif
int main(int argc, char ** argv){
int **M;
int *v1, *v2;
int i, k, N;
double cgt1, cgt2, ncgt; //para tiempo de ejecución
time_t t;
// Semilla de rand()
srand((unsigned) time(&t));
// Obtenemos el numero de filas x columnas de la matriz cuadrada
if(argc < 2){
fprintf(stderr,"Falta iteraciones\n");
exit(-1);
}
N = atoi(argv[1]);
// == Reserva de Memoria
// ====================================================>
v1 = (int *) malloc(N*sizeof(int));
v2 = (int *) malloc(N*sizeof(int));
if ( v1 == NULL || v2 == NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
M = (int**) malloc (N*sizeof(int*));
// i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue
// i = 0, i = 3, i = 6 para un bucle de N = 9
#pragma omp parallel for shared(M,N) private(i) default(none)
for(i = 0; i<N; i++){
M[i] = (int*) malloc (N*sizeof(int));
if( M[i] == NULL ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
}
// == Inicializacion
// ====================================================>
// M, v1, v2, N, i compartidas
// Cada hebra se encargará de una parte del bucle usando i
// k es privada
// Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial
for(i = 0; i<N; i++){
#pragma omp parallel for shared(M,i,N) private(k) default(none)
for(k = 0; k<N; k++)
M[i][k] = rand() % 8;
}
#pragma omp parallel for shared(v1,v2,N) private(i) default(none)
for(i = 0; i<N; i++){
v1[i] = rand() % 6;
v2[i] = 0;
}
// == Calculo
// ====================================================>
cgt1 = omp_get_wtime();
// Dejamos el vector resultado v2 lo dejo como shared para que todas las hebras puedan acceder a el sin necesidad de tener una copia
// local, pero los accesos a ese vector las hebras lo tienen que hacer de forma atomica sin interfoliaciones.
for(i = 0; i<N; i++){
#pragma omp parallel shared(M,i,N,v2,v1) private(k) default(none)
{
int sumalocal = 0;
// vamos a sumar cachos de columnas de una fila de la matriz con el vector
#pragma omp for
for(k = 0; k<N; k++)
sumalocal += M[i][k] * v1[k];
// como tenemos accesos concurrentes a la variable compartida v2 necesitamos aunar los resultados de las sumas parciales
// de forma atomica
#pragma omp atomic
v2[i] += sumalocal;
}
}
cgt2 = omp_get_wtime();
ncgt = (double)(cgt2 - cgt1);
// == Imprimir Mensajes
// ====================================================>
printf("Tiempo(seg.):%11.9f\n", ncgt);
printf("Tamaño de los vectores: %u\n", N);
printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int));
printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int));
// Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador
// eliminen el código de la suma.
printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]);
// Para tamaños pequeños de N < 15 mostrar los valores calculados
if(N < 15){
printf("\n----------- Matriz M ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", M[i][k]);
printf("\n");
}
printf("\n----------- Vector V1 ----------- \n");
for(i = 0; i<N; i++)
printf("%u\t", v1[i]);
printf("\n");
printf("\n----------- Vector V2----------- \n");
for(i = 0; i<N; i++)
printf("%u\t", v2[i]);
printf("\n");
}
// == Liberar Memoria
// ====================================================>
free(v1);
free(v2);
#pragma omp parallel for shared(M,N) private(i) default(none)
for(i = 0; i<N; i++)
free(M[i]);
free(M);
} |
ordered-2.c | /* { dg-do compile } */
void f1(void)
{
#pragma omp ordered asdf /* { dg-error "expected" } */
#pragma omp ordered
} /* { dg-error "expected expression" } */
|
GB_unop__erf_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__erf_fp64_fp64)
// op(A') function: GB (_unop_tran__erf_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = erf (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = erf (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = erf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ERF || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__erf_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = erf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = erf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__erf_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB003-antidep2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A two-level loop nest with loop carried anti-dependence on the outer level.
Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc,char *argv[])
{
int i, j;
int len = 20;
double a[20][20];
#pragma omp parallel for private(j)
for (i=0; i< len; i++)
#pragma omp parallel for simd
for (j=0; j<len; j++)
a[i][j] = (i * len + j + 0.5);
for (i = 0; i < len - 1; i += 1) {
#pragma omp parallel for simd
for (j = 0; j < len ; j += 1) {
a[i][j] += a[i + 1][j];
}
}
#pragma omp parallel for private(j) ordered
for (i=0; i< len; i++)
#pragma omp parallel for simd ordered
for (j=0; j<len; j++)
#pragma omp ordered simd
printf("%lf",a[i][j]);
printf ("a[10][10]=%f\n", a[10][10]);
return 0;
}
|
debug_private.c | // This testcase checks emission of debug info for variables inside
// private/firstprivate/lastprivate.
// REQUIRES: x86_64-linux
// RUN: %clang_cc1 -no-opaque-pointers -debug-info-kind=constructor -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -debug-info-kind=line-directives-only -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG
// RUN: %clang_cc1 -no-opaque-pointers -debug-info-kind=line-tables-only -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG
// RUN: %clang_cc1 -no-opaque-pointers -debug-info-kind=limited -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// CHECK: define internal i32 @.omp_task_entry.
// CHECK: call void @llvm.dbg.declare(metadata i32** %.priv.ptr.addr.i, metadata [[PRIV1:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK: call void @llvm.dbg.declare(metadata i32** %.priv.ptr.addr1.i, metadata [[PRIV2:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK: call void @llvm.dbg.declare(metadata i32** %.firstpriv.ptr.addr.i, metadata [[FPRIV:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// NEG-NOT: call void @llvm.dbg.declare
// CHECK: [[PRIV1]] = !DILocalVariable(name: "priv1"
// CHECK: [[PRIV2]] = !DILocalVariable(name: "priv2"
// CHECK: [[FPRIV]] = !DILocalVariable(name: "fpriv"
extern int printf(const char *, ...);
int foo(int n) {
int res, priv1, priv2, fpriv;
fpriv = n + 4;
if (n < 2)
return n;
else {
#pragma omp task shared(res) private(priv1, priv2) firstprivate(fpriv)
{
priv1 = n;
priv2 = n + 2;
printf("Task n=%d,priv1=%d,priv2=%d,fpriv=%d\n", n, priv1, priv2, fpriv);
res = priv1 + priv2 + fpriv + foo(n - 1);
}
#pragma omp taskwait
return res;
}
}
int main() {
int n = 10;
printf("foo(%d) = %d\n", n, foo(n));
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-12,16)),t1);t3<=min(min(min(floord(Nt+Ny-4,16),floord(16*t1+Ny+29,16)),floord(32*t2+Ny+28,16)),floord(32*t1-32*t2+Nz+Ny+27,16));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(16*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(16*t3+Nx+12,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),16*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),16*t3+14),64*t4+62),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
omp-parallel-for.c | #include <omp.h>
#include <stdio.h>
#define LEN 20
int main(void)
{
int num[LEN] = {0}, k=0;
#pragma omp parallel for
for (k=0; k<LEN; k++)
{
num[k] = omp_get_thread_num();
}
return 0;
}
|
GB_binop__isgt_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint32)
// A*D function (colscale): GB (_AxD__isgt_uint32)
// D*A function (rowscale): GB (_DxB__isgt_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint32)
// C=scalar+B GB (_bind1st__isgt_uint32)
// C=scalar+B' GB (_bind1st_tran__isgt_uint32)
// C=A+scalar GB (_bind2nd__isgt_uint32)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT32 || GxB_NO_ISGT_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
resize_bicubic.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_RESIZE_BICUBIC_H_
#define MACE_KERNELS_RESIZE_BICUBIC_H_
#include <algorithm>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/utils/logging.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
static const int64_t kTableSize = (1 << 10);
inline const float* InitCoeffsTable() {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_tab = new float[(kTableSize + 1) * 2];
static const double A = -0.75;
for (int i = 0; i <= kTableSize; ++i) {
float x = i * 1.0 / kTableSize;
coeffs_tab[i * 2] = ((A + 2) * x - (A + 3)) * x * x + 1;
x += 1.0;
coeffs_tab[i * 2 + 1] = ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
return coeffs_tab;
}
inline const float* GetCoeffsTable() {
// Static so that we initialize it on first use
static const float* coeffs_tab = InitCoeffsTable();
return coeffs_tab;
}
inline int64_t Bound(int64_t val, int64_t limit) {
return std::min<int64_t>(limit - 1ll, std::max<int64_t>(0ll, val));
}
inline void GetWeightsAndIndices(float scale, int64_t out_loc, int64_t limit,
std::array<float, 4>* weights,
std::array<int64_t, 4>* indices) {
const int64_t in_loc = scale * out_loc;
const float delta = scale * out_loc - in_loc;
const int64_t offset = lrintf(delta * kTableSize);
const float* coeffs_tab = GetCoeffsTable();
*weights = {{coeffs_tab[offset * 2 + 1], coeffs_tab[offset * 2],
coeffs_tab[(kTableSize - offset) * 2],
coeffs_tab[(kTableSize - offset) * 2 + 1]}};
*indices = {{Bound(in_loc - 1, limit), Bound(in_loc, limit),
Bound(in_loc + 1, limit), Bound(in_loc + 2, limit)}};
}
inline float Interpolate1D(const std::array<float, 4>& weights,
const std::array<float, 4>& values) {
return values[0] * weights[0] + values[1] * weights[1] +
values[2] * weights[2] + values[3] * weights[3];
}
inline float CalculateResizeScale(index_t in_size,
index_t out_size,
bool align_corners) {
return (align_corners && out_size > 1)
? (in_size - 1) / static_cast<float>(out_size - 1)
: in_size / static_cast<float>(out_size);
}
inline void ResizeImage(const float *images,
const index_t batch_size,
const index_t in_height,
const index_t in_width,
const index_t out_height,
const index_t out_width,
const index_t channels,
const float height_scale,
const float width_scale,
float *output) {
std::array<float, 4> coeff = {{0.0, 0.0, 0.0, 0.0}};
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch_size; ++b) {
for (index_t y = 0; y < out_height; ++y) {
std::array<float, 4> y_weights;
std::array<index_t, 4> y_indices;
GetWeightsAndIndices(height_scale, y, in_height, &y_weights,
&y_indices);
for (index_t x = 0; x < out_width; ++x) {
std::array<float, 4> x_weights;
std::array<index_t, 4> x_indices;
GetWeightsAndIndices(width_scale, x, in_width, &x_weights,
&x_indices);
for (index_t c = 0; c < channels; ++c) {
// Use a 4x4 patch to compute the interpolated output value at
// (b, y, x, c).
const float *channel_input_ptr =
images + (b * channels + c) * in_height * in_width;
float *channel_output_ptr =
output + (b * channels + c) * out_height * out_width;
for (index_t i = 0; i < 4; ++i) {
const std::array<float, 4> values = {
{static_cast<float>(channel_input_ptr
[y_indices[i] * in_width + x_indices[0]]),
static_cast<float>(channel_input_ptr
[y_indices[i] * in_width + x_indices[1]]),
static_cast<float>(channel_input_ptr
[y_indices[i] * in_width + x_indices[2]]),
static_cast<float>(channel_input_ptr
[y_indices[i] * in_width + x_indices[3]])}};
coeff[i] = Interpolate1D(x_weights, values);
}
channel_output_ptr[y * out_width + x] =
Interpolate1D(y_weights, coeff);
}
}
}
}
}
struct ResizeBicubicFunctorBase {
ResizeBicubicFunctorBase(const std::vector<index_t> &size,
bool align_corners)
: align_corners_(align_corners) {
MACE_CHECK(size.size() == 2);
out_height_ = size[0];
out_width_ = size[1];
}
protected:
bool align_corners_;
index_t out_height_;
index_t out_width_;
};
template<DeviceType D, typename T>
struct ResizeBicubicFunctor;
template<>
struct ResizeBicubicFunctor<DeviceType::CPU, float>
: ResizeBicubicFunctorBase {
ResizeBicubicFunctor(const std::vector<index_t> &size, bool align_corners)
: ResizeBicubicFunctorBase(size, align_corners) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const index_t batch = input->dim(0);
const index_t channels = input->dim(1);
const index_t in_height = input->dim(2);
const index_t in_width = input->dim(3);
index_t out_height = out_height_;
index_t out_width = out_width_;
MACE_CHECK(out_height > 0 && out_width > 0);
std::vector<index_t> out_shape{batch, channels, out_height, out_width};
MACE_RETURN_IF_ERROR(output->Resize(out_shape));
Tensor::MappingGuard input_mapper(input);
Tensor::MappingGuard output_mapper(output);
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
if (out_height == in_height && out_width == in_width) {
std::copy(input_data,
input_data + batch * channels * in_height * in_width,
output_data);
return MACE_SUCCESS;
}
float height_scale =
CalculateResizeScale(in_height, out_height, align_corners_);
float width_scale =
CalculateResizeScale(in_width, out_width, align_corners_);
ResizeImage(input_data, batch, in_height, in_width, out_height, out_width,
channels, height_scale, width_scale, output_data);
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct ResizeBicubicFunctor<DeviceType::GPU, T>
: ResizeBicubicFunctorBase {
ResizeBicubicFunctor(const std::vector<index_t> &size, bool align_corners)
: ResizeBicubicFunctorBase(size, align_corners) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_RESIZE_BICUBIC_H_
|
poisson2D.c | /*******************************************************************
*** poisson2D: Numerical solution of the Poisson PDE in 2D.
***
*** Solver function and iterators.
***
*** Author: Nikos Tryfonidis, December 2015
*** The MIT License (MIT)
*** Copyright (c) 2015 Nikos Tryfonidis
*** See LICENSE.txt
*******************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
double residual(double **u, double **rhs, double dx, double dy, int nX, int nY);
/* Poisson 2D Solver: Iterates until solution with desired accuracy is found
or until it reaches "maxIterations". Calls iterative functions from "iterative.c".
*/
double **solvePoisson2D(double ** (*iterate)(double **, double **, double, double, int, int),
double **array2D, double **rhs, double dx, double dy, int nX, int nY)
{
int t, nIterations, iterationsPerCheck, maxIterations;
double res, tolerance;
/* Set tolerance (value for residual considered adequate),
maximum iterations and iterations per residual check */
tolerance = sqrt(nX*nY)*dx*dy; // Some multiple of expected accuracy of scheme
maxIterations = 100*nX*nY; // Some multiple of expected number of iterations to converge
iterationsPerCheck = 10;
nIterations = 0; //counter
/* Iterate until residual < tolerance or until maxIterations are reached */
do {
// Calculate residual
res = residual(array2D, rhs, dx, dy, nX, nY);
// Do "iterationsPerCheck" iterations before checking residual
for (t=0;t<iterationsPerCheck;t++) {
array2D = (*iterate)(array2D, rhs, dx, dy, nX, nY);
nIterations += 1;
}
// Test print for residual every "iterationsPerCheck"
//printf("iteration %d: residual = %f\ttolerance: %f\n", nIterations, res, tolerance);
} while (res > tolerance && nIterations <= maxIterations);
/* Print number of iterations needed */
printf("Done! Iterations required: %d\n", nIterations);
if(nIterations>=maxIterations) {
printf("Warning: Maximum number of iterations reached! (%d)\n", nIterations);
}
return array2D;
}
/* Residual function: Calculates the residual
after the application of relaxation.
*/
double residual(double **u, double **rhs, double dx, double dy, int nX, int nY) {
int i, j;
double Ax_r, res;
res = 0;
/* Calculate "A*x - rho" and then add to res. */
#pragma omp parallel for schedule(static) shared(u, rhs) private(i, j, Ax_r)\
firstprivate(nX, nY, dx, dy) reduction(+:res) default(none)
for (i=1; i<nX-1; i++) {
for (j=1; j<nY-1; j++) {
Ax_r = ( 1.0/(dx*dx)*(u[i-1][j]-2.0*u[i][j]+u[i+1][j]) +
1.0/(dy*dy)*(u[i][j-1]-2.0*u[i][j]+u[i][j+1]) -
rhs[i][j] );
res += Ax_r*Ax_r;
}
}
return sqrt(res);
}
|
openmp-ex31.c | /* Of course, rand_r() is thread safe */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void safe_one (unsigned int *seed)
{
int random = rand_r(seed);
printf("This function does something thread safe, like calculating %d from rand_r.\n",random);
}
void safe_two (unsigned int *seed)
{
int random = rand_r(seed);
safe_one(seed);
printf("This function calculates another random number %d\n",random);
}
int main(void)
{
#pragma omp parallel
{
unsigned int seed = omp_get_thread_num();
safe_two(&seed);
}
return 0;
}
|
conv_dw_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "conv_dw_kernel_x86.h"
#if __SSE2__
#include <emmintrin.h>
#endif
#if __AVX__
#include <immintrin.h>
#endif
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = max(data[i], ( float )0);
if (activation > 0)
{
data[i] = min(data[i], ( float )activation);
}
}
}
static void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v)
{
float* ptr = input;
float* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(float));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
#if __AVX__
static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 3;
int channel_remain = inc - (channel_count << 3);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 8;
const float* k0 = img_data + (ii + 0) * inwh;
const float* k1 = img_data + (ii + 1) * inwh;
const float* k2 = img_data + (ii + 2) * inwh;
const float* k3 = img_data + (ii + 3) * inwh;
const float* k4 = img_data + (ii + 4) * inwh;
const float* k5 = img_data + (ii + 5) * inwh;
const float* k6 = img_data + (ii + 6) * inwh;
const float* k7 = img_data + (ii + 7) * inwh;
const float* f0 = kernel_data + (ii + 0) * 9;
const float* f1 = kernel_data + (ii + 1) * 9;
const float* f2 = kernel_data + (ii + 2) * 9;
const float* f3 = kernel_data + (ii + 3) * 9;
const float* f4 = kernel_data + (ii + 4) * 9;
const float* f5 = kernel_data + (ii + 5) * 9;
const float* f6 = kernel_data + (ii + 6) * 9;
const float* f7 = kernel_data + (ii + 7) * 9;
const float* b0 = bias_data + (ii + 0);
const float* b1 = bias_data + (ii + 1);
const float* b2 = bias_data + (ii + 2);
const float* b3 = bias_data + (ii + 3);
const float* b4 = bias_data + (ii + 4);
const float* b5 = bias_data + (ii + 5);
const float* b6 = bias_data + (ii + 6);
const float* b7 = bias_data + (ii + 7);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0[4] = k4[0];
tmp0[5] = k5[0];
tmp0[6] = k6[0];
tmp0[7] = k7[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
k4++;
k5++;
k6++;
k7++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1[4] = f4[0];
tmp1[5] = f5[0];
tmp1[6] = f6[0];
tmp1[7] = f7[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
f4++;
f5++;
f6++;
f7++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
tmp2[4] = b4[0];
tmp2[5] = b5[0];
tmp2[6] = b6[0];
tmp2[7] = b7[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
tmp2[4] = 0;
tmp2[5] = 0;
tmp2[6] = 0;
tmp2[7] = 0;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + channel_count * 8;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 8;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 8;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 8 * 9;
float* btmp = bias_tmp + c * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw;
float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw;
float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw;
float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _sum4 = _mm256_loadu_ps(btmp);
__m256 _sum5 = _mm256_loadu_ps(btmp);
__m256 _sum6 = _mm256_loadu_ps(btmp);
__m256 _sum7 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _va6 = _mm256_loadu_ps(itmp0 + 48);
__m256 _va7 = _mm256_loadu_ps(itmp0 + 56);
__m256 _va8 = _mm256_loadu_ps(itmp0 + 64);
__m256 _va9 = _mm256_loadu_ps(itmp0 + 72);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_va6 = _mm256_loadu_ps(itmp1 + 48);
_va7 = _mm256_loadu_ps(itmp1 + 56);
_va8 = _mm256_loadu_ps(itmp1 + 64);
_va9 = _mm256_loadu_ps(itmp1 + 72);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_va6 = _mm256_loadu_ps(itmp2 + 48);
_va7 = _mm256_loadu_ps(itmp2 + 56);
_va8 = _mm256_loadu_ps(itmp2 + 64);
_va9 = _mm256_loadu_ps(itmp2 + 72);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5);
_sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4);
_sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5);
_sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4);
_sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6);
_sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7);
_sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5);
_sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6);
_sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7);
_sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6);
_sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
_mm256_storeu_ps(otmp + 32, _sum4);
_mm256_storeu_ps(otmp + 40, _sum5);
_mm256_storeu_ps(otmp + 48, _sum6);
_mm256_storeu_ps(otmp + 56, _sum7);
itmp0 += 64;
itmp1 += 64;
itmp2 += 64;
otmp += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2);
_sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2);
_sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3);
_sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_mm256_storeu_ps(otmp, _sum0);
itmp0 += 8;
itmp1 += 8;
itmp2 += 8;
otmp += 8;
}
}
}
// load_data
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 8 * outwh;
float* tmp0 = output + i * 8 * outwh;
float* tmp1 = output + i * 8 * outwh + 1 * outwh;
float* tmp2 = output + i * 8 * outwh + 2 * outwh;
float* tmp3 = output + i * 8 * outwh + 3 * outwh;
float* tmp4 = output + i * 8 * outwh + 4 * outwh;
float* tmp5 = output + i * 8 * outwh + 5 * outwh;
float* tmp6 = output + i * 8 * outwh + 6 * outwh;
float* tmp7 = output + i * 8 * outwh + 7 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
tmp4[0] = otmp[4];
tmp5[0] = otmp[5];
tmp6[0] = otmp[6];
tmp7[0] = otmp[7];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
tmp4++;
tmp5++;
tmp6++;
tmp7++;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + ii * outwh;
float* tmp0 = output + ii * outwh;
float* tmp1 = output + ii * outwh + 1 * outwh;
float* tmp2 = output + ii * outwh + 2 * outwh;
float* tmp3 = output + ii * outwh + 3 * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + channel_count * 8 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 8;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 3;
int channel_remain = inc - (channel_count << 3);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 8;
const float* k0 = img_data + (ii + 0) * inwh;
const float* k1 = img_data + (ii + 1) * inwh;
const float* k2 = img_data + (ii + 2) * inwh;
const float* k3 = img_data + (ii + 3) * inwh;
const float* k4 = img_data + (ii + 4) * inwh;
const float* k5 = img_data + (ii + 5) * inwh;
const float* k6 = img_data + (ii + 6) * inwh;
const float* k7 = img_data + (ii + 7) * inwh;
const float* f0 = kernel_data + (ii + 0) * 9;
const float* f1 = kernel_data + (ii + 1) * 9;
const float* f2 = kernel_data + (ii + 2) * 9;
const float* f3 = kernel_data + (ii + 3) * 9;
const float* f4 = kernel_data + (ii + 4) * 9;
const float* f5 = kernel_data + (ii + 5) * 9;
const float* f6 = kernel_data + (ii + 6) * 9;
const float* f7 = kernel_data + (ii + 7) * 9;
const float* b0 = bias_data + (ii + 0);
const float* b1 = bias_data + (ii + 1);
const float* b2 = bias_data + (ii + 2);
const float* b3 = bias_data + (ii + 3);
const float* b4 = bias_data + (ii + 4);
const float* b5 = bias_data + (ii + 5);
const float* b6 = bias_data + (ii + 6);
const float* b7 = bias_data + (ii + 7);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0[4] = k4[0];
tmp0[5] = k5[0];
tmp0[6] = k6[0];
tmp0[7] = k7[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
k4++;
k5++;
k6++;
k7++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1[4] = f4[0];
tmp1[5] = f5[0];
tmp1[6] = f6[0];
tmp1[7] = f7[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
f4++;
f5++;
f6++;
f7++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
tmp2[4] = b4[0];
tmp2[5] = b5[0];
tmp2[6] = b6[0];
tmp2[7] = b7[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
tmp2[4] = 0;
tmp2[5] = 0;
tmp2[6] = 0;
tmp2[7] = 0;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 8;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 8;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 8 * inwh;
float* tmp1 = kernel_tmp + channel_count * 8 * 9;
float* tmp2 = bias_tmp + channel_count * 8;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 8;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 8;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 8 * 9;
float* btmp = bias_tmp + c * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw;
float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw;
float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw;
float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _sum2 = _mm256_loadu_ps(btmp);
__m256 _sum3 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _va5 = _mm256_loadu_ps(itmp0 + 40);
__m256 _va6 = _mm256_loadu_ps(itmp0 + 48);
__m256 _va7 = _mm256_loadu_ps(itmp0 + 56);
__m256 _va8 = _mm256_loadu_ps(itmp0 + 64);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_va5 = _mm256_loadu_ps(itmp1 + 40);
_va6 = _mm256_loadu_ps(itmp1 + 48);
_va7 = _mm256_loadu_ps(itmp1 + 56);
_va8 = _mm256_loadu_ps(itmp1 + 64);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_va5 = _mm256_loadu_ps(itmp2 + 40);
_va6 = _mm256_loadu_ps(itmp2 + 48);
_va7 = _mm256_loadu_ps(itmp2 + 56);
_va8 = _mm256_loadu_ps(itmp2 + 64);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2);
_sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2);
_sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2);
_sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3);
_sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3);
_sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
_mm256_storeu_ps(otmp + 16, _sum2);
_mm256_storeu_ps(otmp + 24, _sum3);
itmp0 += 64;
itmp1 += 64;
itmp2 += 64;
otmp += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _sum1 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _va3 = _mm256_loadu_ps(itmp0 + 24);
__m256 _va4 = _mm256_loadu_ps(itmp0 + 32);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_va3 = _mm256_loadu_ps(itmp1 + 24);
_va4 = _mm256_loadu_ps(itmp1 + 32);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_va3 = _mm256_loadu_ps(itmp2 + 24);
_va4 = _mm256_loadu_ps(itmp2 + 32);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1);
_sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1);
_sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1);
_mm256_storeu_ps(otmp, _sum0);
_mm256_storeu_ps(otmp + 8, _sum1);
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(btmp);
__m256 _va0 = _mm256_loadu_ps(itmp0);
__m256 _va1 = _mm256_loadu_ps(itmp0 + 8);
__m256 _va2 = _mm256_loadu_ps(itmp0 + 16);
__m256 _vb0 = _mm256_loadu_ps(ktmp);
__m256 _vb1 = _mm256_loadu_ps(ktmp + 8);
__m256 _vb2 = _mm256_loadu_ps(ktmp + 16);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp1);
_va1 = _mm256_loadu_ps(itmp1 + 8);
_va2 = _mm256_loadu_ps(itmp1 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 24);
_vb1 = _mm256_loadu_ps(ktmp + 32);
_vb2 = _mm256_loadu_ps(ktmp + 40);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_va0 = _mm256_loadu_ps(itmp2);
_va1 = _mm256_loadu_ps(itmp2 + 8);
_va2 = _mm256_loadu_ps(itmp2 + 16);
_vb0 = _mm256_loadu_ps(ktmp + 48);
_vb1 = _mm256_loadu_ps(ktmp + 56);
_vb2 = _mm256_loadu_ps(ktmp + 64);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0);
_sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0);
_sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0);
_mm256_storeu_ps(otmp, _sum0);
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 8;
}
}
}
// load_data
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 8 * outwh;
float* tmp0 = output + i * 8 * outwh;
float* tmp1 = output + i * 8 * outwh + 1 * outwh;
float* tmp2 = output + i * 8 * outwh + 2 * outwh;
float* tmp3 = output + i * 8 * outwh + 3 * outwh;
float* tmp4 = output + i * 8 * outwh + 4 * outwh;
float* tmp5 = output + i * 8 * outwh + 5 * outwh;
float* tmp6 = output + i * 8 * outwh + 6 * outwh;
float* tmp7 = output + i * 8 * outwh + 7 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
tmp4[0] = otmp[4];
tmp5[0] = otmp[5];
tmp6[0] = otmp[6];
tmp7[0] = otmp[7];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
tmp4++;
tmp5++;
tmp6++;
tmp7++;
}
}
int i = 0;
for (; i + 3 < channel_remain; i += 4)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + ii * outwh;
float* tmp0 = output + ii * outwh;
float* tmp1 = output + ii * outwh + 1 * outwh;
float* tmp2 = output + ii * outwh + 2 * outwh;
float* tmp3 = output + ii * outwh + 3 * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 8;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (; i < channel_remain; i++)
{
int ii = channel_count * 8 + i;
float* otmp = output_tmp + channel_count * 8 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 8;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
#elif __SSE2__
static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 2;
int channel_remain = inc - (channel_count << 2);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 4;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 4;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 4;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 4 * inwh;
float* tmp1 = kernel_tmp + channel_count * 4 * 9;
float* tmp2 = bias_tmp + channel_count * 4;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 4;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 4;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 4 * 9;
float* btmp = bias_tmp + c * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw;
float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw;
float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw;
float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw;
for (; j + 7 < outw; j += 8)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _sum4 = _mm_loadu_ps(btmp);
__m128 _sum5 = _mm_loadu_ps(btmp);
__m128 _sum6 = _mm_loadu_ps(btmp);
__m128 _sum7 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _va6 = _mm_loadu_ps(itmp0 + 24);
__m128 _va7 = _mm_loadu_ps(itmp0 + 28);
__m128 _va8 = _mm_loadu_ps(itmp0 + 32);
__m128 _va9 = _mm_loadu_ps(itmp0 + 36);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_va6 = _mm_loadu_ps(itmp1 + 24);
_va7 = _mm_loadu_ps(itmp1 + 28);
_va8 = _mm_loadu_ps(itmp1 + 32);
_va9 = _mm_loadu_ps(itmp1 + 36);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_va6 = _mm_loadu_ps(itmp2 + 24);
_va7 = _mm_loadu_ps(itmp2 + 28);
_va8 = _mm_loadu_ps(itmp2 + 32);
_va9 = _mm_loadu_ps(itmp2 + 36);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4);
_sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5);
_sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4);
_sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7);
_sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5);
_sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7);
_sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6);
_sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7);
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
_mm_storeu_ps(otmp + 16, _sum4);
_mm_storeu_ps(otmp + 20, _sum5);
_mm_storeu_ps(otmp + 24, _sum6);
_mm_storeu_ps(otmp + 28, _sum7);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 4] * ktmp[k];
sum1[k] += itmp1[k + 4] * ktmp[k + 12];
sum1[k] += itmp2[k + 4] * ktmp[k + 24];
sum1[k] += itmp0[k + 8] * ktmp[k + 4];
sum1[k] += itmp1[k + 8] * ktmp[k + 16];
sum1[k] += itmp2[k + 8] * ktmp[k + 28];
sum1[k] += itmp0[k + 12] * ktmp[k + 8];
sum1[k] += itmp1[k + 12] * ktmp[k + 20];
sum1[k] += itmp2[k + 12] * ktmp[k + 32];
sum2[k] += itmp0[k + 8] * ktmp[k];
sum2[k] += itmp1[k + 8] * ktmp[k + 12];
sum2[k] += itmp2[k + 8] * ktmp[k + 24];
sum2[k] += itmp0[k + 12] * ktmp[k + 4];
sum2[k] += itmp1[k + 12] * ktmp[k + 16];
sum2[k] += itmp2[k + 12] * ktmp[k + 28];
sum2[k] += itmp0[k + 16] * ktmp[k + 8];
sum2[k] += itmp1[k + 16] * ktmp[k + 20];
sum2[k] += itmp2[k + 16] * ktmp[k + 32];
sum3[k] += itmp0[k + 12] * ktmp[k];
sum3[k] += itmp1[k + 12] * ktmp[k + 12];
sum3[k] += itmp2[k + 12] * ktmp[k + 24];
sum3[k] += itmp0[k + 16] * ktmp[k + 4];
sum3[k] += itmp1[k + 16] * ktmp[k + 16];
sum3[k] += itmp2[k + 16] * ktmp[k + 28];
sum3[k] += itmp0[k + 20] * ktmp[k + 8];
sum3[k] += itmp1[k + 20] * ktmp[k + 20];
sum3[k] += itmp2[k + 20] * ktmp[k + 32];
sum4[k] += itmp0[k + 16] * ktmp[k];
sum4[k] += itmp1[k + 16] * ktmp[k + 12];
sum4[k] += itmp2[k + 16] * ktmp[k + 24];
sum4[k] += itmp0[k + 20] * ktmp[k + 4];
sum4[k] += itmp1[k + 20] * ktmp[k + 16];
sum4[k] += itmp2[k + 20] * ktmp[k + 28];
sum4[k] += itmp0[k + 24] * ktmp[k + 8];
sum4[k] += itmp1[k + 24] * ktmp[k + 20];
sum4[k] += itmp2[k + 24] * ktmp[k + 32];
sum5[k] += itmp0[k + 20] * ktmp[k];
sum5[k] += itmp1[k + 20] * ktmp[k + 12];
sum5[k] += itmp2[k + 20] * ktmp[k + 24];
sum5[k] += itmp0[k + 24] * ktmp[k + 4];
sum5[k] += itmp1[k + 24] * ktmp[k + 16];
sum5[k] += itmp2[k + 24] * ktmp[k + 28];
sum5[k] += itmp0[k + 28] * ktmp[k + 8];
sum5[k] += itmp1[k + 28] * ktmp[k + 20];
sum5[k] += itmp2[k + 28] * ktmp[k + 32];
sum6[k] += itmp0[k + 24] * ktmp[k];
sum6[k] += itmp1[k + 24] * ktmp[k + 12];
sum6[k] += itmp2[k + 24] * ktmp[k + 24];
sum6[k] += itmp0[k + 28] * ktmp[k + 4];
sum6[k] += itmp1[k + 28] * ktmp[k + 16];
sum6[k] += itmp2[k + 28] * ktmp[k + 28];
sum6[k] += itmp0[k + 32] * ktmp[k + 8];
sum6[k] += itmp1[k + 32] * ktmp[k + 20];
sum6[k] += itmp2[k + 32] * ktmp[k + 32];
sum7[k] += itmp0[k + 28] * ktmp[k];
sum7[k] += itmp1[k + 28] * ktmp[k + 12];
sum7[k] += itmp2[k + 28] * ktmp[k + 24];
sum7[k] += itmp0[k + 32] * ktmp[k + 4];
sum7[k] += itmp1[k + 32] * ktmp[k + 16];
sum7[k] += itmp2[k + 32] * ktmp[k + 28];
sum7[k] += itmp0[k + 36] * ktmp[k + 8];
sum7[k] += itmp1[k + 36] * ktmp[k + 20];
sum7[k] += itmp2[k + 36] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
otmp[k + 16] = sum4[k];
otmp[k + 20] = sum5[k];
otmp[k + 24] = sum6[k];
otmp[k + 28] = sum7[k];
}
#endif
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 32;
}
for (; j + 3 < outw; j += 4)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3);
_sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3);
_sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3);
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 4] * ktmp[k];
sum1[k] += itmp1[k + 4] * ktmp[k + 12];
sum1[k] += itmp2[k + 4] * ktmp[k + 24];
sum1[k] += itmp0[k + 8] * ktmp[k + 4];
sum1[k] += itmp1[k + 8] * ktmp[k + 16];
sum1[k] += itmp2[k + 8] * ktmp[k + 28];
sum1[k] += itmp0[k + 12] * ktmp[k + 8];
sum1[k] += itmp1[k + 12] * ktmp[k + 20];
sum1[k] += itmp2[k + 12] * ktmp[k + 32];
sum2[k] += itmp0[k + 8] * ktmp[k];
sum2[k] += itmp1[k + 8] * ktmp[k + 12];
sum2[k] += itmp2[k + 8] * ktmp[k + 24];
sum2[k] += itmp0[k + 12] * ktmp[k + 4];
sum2[k] += itmp1[k + 12] * ktmp[k + 16];
sum2[k] += itmp2[k + 12] * ktmp[k + 28];
sum2[k] += itmp0[k + 16] * ktmp[k + 8];
sum2[k] += itmp1[k + 16] * ktmp[k + 20];
sum2[k] += itmp2[k + 16] * ktmp[k + 32];
sum3[k] += itmp0[k + 12] * ktmp[k];
sum3[k] += itmp1[k + 12] * ktmp[k + 12];
sum3[k] += itmp2[k + 12] * ktmp[k + 24];
sum3[k] += itmp0[k + 16] * ktmp[k + 4];
sum3[k] += itmp1[k + 16] * ktmp[k + 16];
sum3[k] += itmp2[k + 16] * ktmp[k + 28];
sum3[k] += itmp0[k + 20] * ktmp[k + 8];
sum3[k] += itmp1[k + 20] * ktmp[k + 20];
sum3[k] += itmp2[k + 20] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
}
#endif
itmp0 += 16;
itmp1 += 16;
itmp2 += 16;
otmp += 16;
}
for (; j < outw; j++)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0);
_mm_storeu_ps(otmp, _sum0);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
}
#endif
itmp0 += 4;
itmp1 += 4;
itmp2 += 4;
otmp += 4;
}
}
}
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 4 * outwh;
float* tmp0 = output + i * 4 * outwh;
float* tmp1 = output + i * 4 * outwh + 1 * outwh;
float* tmp2 = output + i * 4 * outwh + 2 * outwh;
float* tmp3 = output + i * 4 * outwh + 3 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 4;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* otmp = output_tmp + channel_count * 4 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 4;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw,
int outh, int outw, int num_thread)
{
int inwh = inw * inh;
int outwh = outw * outh;
int channel_count = inc >> 2;
int channel_remain = inc - (channel_count << 2);
// generate the image tmp
float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float));
float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float));
float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float));
{
for (int i = 0; i < channel_count; i++)
{
int ii = i * 4;
float* k0 = img_data + (ii + 0) * inwh;
float* k1 = img_data + (ii + 1) * inwh;
float* k2 = img_data + (ii + 2) * inwh;
float* k3 = img_data + (ii + 3) * inwh;
float* f0 = kernel_data + (ii + 0) * 9;
float* f1 = kernel_data + (ii + 1) * 9;
float* f2 = kernel_data + (ii + 2) * 9;
float* f3 = kernel_data + (ii + 3) * 9;
float* b0 = bias_data + (ii + 0);
float* b1 = bias_data + (ii + 1);
float* b2 = bias_data + (ii + 2);
float* b3 = bias_data + (ii + 3);
float* tmp0 = img_tmp + ii * inwh;
float* tmp1 = kernel_tmp + ii * 9;
float* tmp2 = bias_tmp + ii;
for (int j = 0; j < inwh; j++)
{
tmp0[0] = k0[0];
tmp0[1] = k1[0];
tmp0[2] = k2[0];
tmp0[3] = k3[0];
tmp0 += 4;
k0++;
k1++;
k2++;
k3++;
}
for (int j = 0; j < 9; j++)
{
tmp1[0] = f0[0];
tmp1[1] = f1[0];
tmp1[2] = f2[0];
tmp1[3] = f3[0];
tmp1 += 4;
f0++;
f1++;
f2++;
f3++;
}
if (bias_data)
{
tmp2[0] = b0[0];
tmp2[1] = b1[0];
tmp2[2] = b2[0];
tmp2[3] = b3[0];
}
else
{
tmp2[0] = 0;
tmp2[1] = 0;
tmp2[2] = 0;
tmp2[3] = 0;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* k0 = img_data + ii * inwh;
float* f0 = kernel_data + ii * 9;
float* b0 = bias_data + ii;
float* tmp0 = img_tmp + channel_count * 4 * inwh;
float* tmp1 = kernel_tmp + channel_count * 4 * 9;
float* tmp2 = bias_tmp + channel_count * 4;
for (int j = 0; j < inwh; j++)
{
tmp0[i] = k0[0];
tmp0 += 4;
k0++;
}
for (int j = 0; j < 9; j++)
{
tmp1[i] = f0[0];
tmp1 += 4;
f0++;
}
if (bias_data)
{
tmp2[i] = b0[0];
}
else
{
tmp2[i] = 0;
}
}
}
float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float));
for (int c = 0; c < channel_count + 1; c++)
{
float* ktmp = kernel_tmp + c * 4 * 9;
float* btmp = bias_tmp + c * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw;
float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw;
float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw;
float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw;
for (; j + 3 < outw; j += 4)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _sum1 = _mm_loadu_ps(btmp);
__m128 _sum2 = _mm_loadu_ps(btmp);
__m128 _sum3 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _va3 = _mm_loadu_ps(itmp0 + 12);
__m128 _va4 = _mm_loadu_ps(itmp0 + 16);
__m128 _va5 = _mm_loadu_ps(itmp0 + 20);
__m128 _va6 = _mm_loadu_ps(itmp0 + 24);
__m128 _va7 = _mm_loadu_ps(itmp0 + 28);
__m128 _va8 = _mm_loadu_ps(itmp0 + 32);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_va3 = _mm_loadu_ps(itmp1 + 12);
_va4 = _mm_loadu_ps(itmp1 + 16);
_va5 = _mm_loadu_ps(itmp1 + 20);
_va6 = _mm_loadu_ps(itmp1 + 24);
_va7 = _mm_loadu_ps(itmp1 + 28);
_va8 = _mm_loadu_ps(itmp1 + 32);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_va3 = _mm_loadu_ps(itmp2 + 12);
_va4 = _mm_loadu_ps(itmp2 + 16);
_va5 = _mm_loadu_ps(itmp2 + 20);
_va6 = _mm_loadu_ps(itmp2 + 24);
_va7 = _mm_loadu_ps(itmp2 + 28);
_va8 = _mm_loadu_ps(itmp2 + 32);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2));
_mm_storeu_ps(otmp, _sum0);
_mm_storeu_ps(otmp + 4, _sum1);
_mm_storeu_ps(otmp + 8, _sum2);
_mm_storeu_ps(otmp + 12, _sum3);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
sum1[k] += itmp0[k + 8] * ktmp[k];
sum1[k] += itmp1[k + 8] * ktmp[k + 12];
sum1[k] += itmp2[k + 8] * ktmp[k + 24];
sum1[k] += itmp0[k + 12] * ktmp[k + 4];
sum1[k] += itmp1[k + 12] * ktmp[k + 16];
sum1[k] += itmp2[k + 12] * ktmp[k + 28];
sum1[k] += itmp0[k + 16] * ktmp[k + 8];
sum1[k] += itmp1[k + 16] * ktmp[k + 20];
sum1[k] += itmp2[k + 16] * ktmp[k + 32];
sum2[k] += itmp0[k + 16] * ktmp[k];
sum2[k] += itmp1[k + 16] * ktmp[k + 12];
sum2[k] += itmp2[k + 16] * ktmp[k + 24];
sum2[k] += itmp0[k + 20] * ktmp[k + 4];
sum2[k] += itmp1[k + 20] * ktmp[k + 16];
sum2[k] += itmp2[k + 20] * ktmp[k + 28];
sum2[k] += itmp0[k + 24] * ktmp[k + 8];
sum2[k] += itmp1[k + 24] * ktmp[k + 20];
sum2[k] += itmp2[k + 24] * ktmp[k + 32];
sum3[k] += itmp0[k + 24] * ktmp[k];
sum3[k] += itmp1[k + 24] * ktmp[k + 12];
sum3[k] += itmp2[k + 24] * ktmp[k + 24];
sum3[k] += itmp0[k + 28] * ktmp[k + 4];
sum3[k] += itmp1[k + 28] * ktmp[k + 16];
sum3[k] += itmp2[k + 28] * ktmp[k + 28];
sum3[k] += itmp0[k + 32] * ktmp[k + 8];
sum3[k] += itmp1[k + 32] * ktmp[k + 20];
sum3[k] += itmp2[k + 32] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
otmp[k + 4] = sum1[k];
otmp[k + 8] = sum2[k];
otmp[k + 12] = sum3[k];
}
#endif
itmp0 += 32;
itmp1 += 32;
itmp2 += 32;
otmp += 16;
}
for (; j < outw; j++)
{
#if __SSE__
__m128 _sum0 = _mm_loadu_ps(btmp);
__m128 _va0 = _mm_loadu_ps(itmp0);
__m128 _va1 = _mm_loadu_ps(itmp0 + 4);
__m128 _va2 = _mm_loadu_ps(itmp0 + 8);
__m128 _vb0 = _mm_loadu_ps(ktmp);
__m128 _vb1 = _mm_loadu_ps(ktmp + 4);
__m128 _vb2 = _mm_loadu_ps(ktmp + 8);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_va0 = _mm_loadu_ps(itmp1);
_va1 = _mm_loadu_ps(itmp1 + 4);
_va2 = _mm_loadu_ps(itmp1 + 8);
_vb0 = _mm_loadu_ps(ktmp + 12);
_vb1 = _mm_loadu_ps(ktmp + 16);
_vb2 = _mm_loadu_ps(ktmp + 20);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_va0 = _mm_loadu_ps(itmp2);
_va1 = _mm_loadu_ps(itmp2 + 4);
_va2 = _mm_loadu_ps(itmp2 + 8);
_vb0 = _mm_loadu_ps(ktmp + 24);
_vb1 = _mm_loadu_ps(ktmp + 28);
_vb2 = _mm_loadu_ps(ktmp + 32);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1));
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2));
_mm_storeu_ps(otmp, _sum0);
#else
float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]};
for (int k = 0; k < 4; k++)
{
sum0[k] += itmp0[k] * ktmp[k];
sum0[k] += itmp1[k] * ktmp[k + 12];
sum0[k] += itmp2[k] * ktmp[k + 24];
sum0[k] += itmp0[k + 4] * ktmp[k + 4];
sum0[k] += itmp1[k + 4] * ktmp[k + 16];
sum0[k] += itmp2[k + 4] * ktmp[k + 28];
sum0[k] += itmp0[k + 8] * ktmp[k + 8];
sum0[k] += itmp1[k + 8] * ktmp[k + 20];
sum0[k] += itmp2[k + 8] * ktmp[k + 32];
}
for (int k = 0; k < 4; k++)
{
otmp[k] = sum0[k];
}
#endif
itmp0 += 8;
itmp1 += 8;
itmp2 += 8;
otmp += 4;
}
}
}
{
for (int i = 0; i < channel_count; i++)
{
float* otmp = output_tmp + i * 4 * outwh;
float* tmp0 = output + i * 4 * outwh;
float* tmp1 = output + i * 4 * outwh + 1 * outwh;
float* tmp2 = output + i * 4 * outwh + 2 * outwh;
float* tmp3 = output + i * 4 * outwh + 3 * outwh;
for (int i = 0; i < outwh; i++)
{
tmp0[0] = otmp[0];
tmp1[0] = otmp[1];
tmp2[0] = otmp[2];
tmp3[0] = otmp[3];
otmp += 4;
tmp0++;
tmp1++;
tmp2++;
tmp3++;
}
}
for (int i = 0; i < channel_remain; i++)
{
int ii = channel_count * 4 + i;
float* otmp = output_tmp + channel_count * 4 * outwh;
float* tmp0 = output + ii * outwh;
for (int j = 0; j < outwh; j++)
{
tmp0[0] = otmp[i];
otmp += 4;
tmp0++;
}
}
}
sys_free(output_tmp);
sys_free(img_tmp);
sys_free(kernel_tmp);
sys_free(bias_tmp);
}
#else
static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
float* outptr2 = outptr + outw;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
const float* kernel0 = kernel + g * 9;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#endif
int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity)
{
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* kernel = ( float* )weight_tensor->data;
float* biases = NULL;
if (bias_tensor)
biases = ( float* )bias_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_chw = inc * inh * inw;
int outc = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_chw = out_hw * outc;
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int stride_w = param->stride_w;
int stride_h = param->stride_h;
int dilation_w = param->dilation_w;
int dilation_h = param->dilation_h;
int group = param->group;
int activation = param->activation;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
float* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input;
else
{
input_tmp = ( float* )sys_malloc(inh_tmp * inw_tmp * group * sizeof(float));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* pad_in = input + g * inh * inw;
float* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f);
}
}
/* process */
for (int i = 0; i < batch_number; i++)
{
if (stride_h == 1)
convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else
convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
}
/* relu */
if (activation >= 0)
relu(output, batch_number * out_chw, activation);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
|
ocp_nlp_sqp_rti.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp_rti.h"
// external
#include <assert.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
#include "acados_c/ocp_qp_interface.h"
/************************************************
* options
************************************************/
acados_size_t ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
acados_size_t size = 0;
size += sizeof(ocp_nlp_sqp_rti_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config,
dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_rti_opts_initialize_default(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_constraints_config **constraints = config->constraints;
// int ii;
// int N = dims->N;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP RTI opts
opts->ext_qp_res = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
// overwrite default submodules opts
// do not compute adjoint in dynamics and constraints
// int compute_adj = 0;
// // dynamics
// for (ii = 0; ii < N; ii++)
// {
// dynamics[ii]->opts_set(dynamics[ii],
// opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj);
// }
// // constraints
// for (ii = 0; ii <= N; ii++)
// {
// constraints[ii]->opts_set(constraints[ii],
// opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj);
// }
return;
}
void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_,
const char *field, void* value)
{
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 2) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0, 1, 2\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
}
}
return;
}
void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
}
/************************************************
* memory
************************************************/
acados_size_t ocp_nlp_sqp_rti_memory_calculate_size(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
acados_size_t size = 0;
size += sizeof(ocp_nlp_sqp_rti_memory);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = 1+1;
int stat_n = 2;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 8; // initial align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_,
void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int ii;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_memory);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = 1+1;
mem->stat_n = 2;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size(
config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
acados_size_t ocp_nlp_sqp_rti_workspace_calculate_size(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
acados_size_t size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_rti_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_rti_cast_workspace(
ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_rti_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(
config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(
dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(
dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config,
dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_sqp_rti_memory *mem = mem_;
// zero timers
acados_timer timer0;
double total_time = 0.0;
mem->time_tot = 0.0;
ocp_nlp_sqp_rti_opts *nlp_opts = opts_;
int rti_phase = nlp_opts->rti_phase;
acados_tic(&timer0);
switch(rti_phase)
{
// perform preparation and feedback rti_phase
case 0:
ocp_nlp_sqp_rti_preparation_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
ocp_nlp_sqp_rti_feedback_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
// perform preparation rti_phase
case 1:
ocp_nlp_sqp_rti_preparation_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
// perform feedback rti_phase
case 2:
ocp_nlp_sqp_rti_feedback_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
}
total_time += acados_toc(&timer0);
mem->time_tot = total_time;
nlp_out->total_time = total_time;
return mem->status;
}
void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_,
void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_)
{
acados_timer timer1;
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
int N = dims->N;
int ii;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(
nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(
nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(
nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(
nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(
nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(
nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(
nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii,
nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(
nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(
nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(
nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(
nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(
nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(
nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(
nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(
nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(
nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(
dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(
dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(
dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(
dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(
dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(
dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(
dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(
dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(
dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
/* SQP body */
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
return;
}
void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_,
void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_)
{
acados_timer timer1;
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int qp_iter = 0;
int qp_status = 0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
mem->time_glob = 0.0;
// embed initial value (this actually updates all bounds at stage 0...)
ocp_nlp_embed_initial_value(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize,
dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
if (opts->print_level > 0) {
printf("\n------- qp_in --------\n");
print_ocp_qp_in(nlp_mem->qp_in);
}
if (!opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver,
opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver,
nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts,
nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize,
dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
qp_iter = qp_info_->num_iter;
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out,
work->qp_res, work->qp_res_ws);
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter,
// inf_norm_qp_res[0], inf_norm_qp_res[1],
// inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(nlp_mem->qp_out);
// exit(1);
// save statistics
mem->stat[mem->stat_n*1+0] = qp_status;
mem->stat[mem->stat_n*1+1] = qp_iter;
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(mem->qp_in);
#ifndef ACADOS_SILENT
printf("QP solver returned error status %d\n", qp_status);
#endif
mem->status = ACADOS_QP_FAILURE;
return;
}
// globalization
acados_tic(&timer1);
double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_glob += acados_toc(&timer1);
// update variables
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// print_ocp_qp_in(mem->qp_in);
mem->status = ACADOS_SUCCESS;
}
int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_,
void *nlp_out_, void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(giaf) flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii],
dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \
for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii],
dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii],
dims->dynamics[ii], nlp_in->dynamics[ii],
opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii],
nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_,
void *mem_, void *work_, char *field, int stage, int index,
void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver,
work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts,
nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0,
sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0,
sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0,
sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0,
sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in \
ocp_nlp_sqp_rti_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_,
const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_rti_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = 1;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_glob", field))
{
double *value = return_value_;
*value = mem->time_glob;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = 2;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_mem->nlp_res;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver,
mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else if (!strcmp("res_stat", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_stat;
}
else if (!strcmp("res_eq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_eq;
}
else if (!strcmp("res_ineq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_ineq;
}
else if (!strcmp("res_comp", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_comp;
}
else if (!strcmp("cost_value", field))
{
double *value = return_value_;
*value = mem->nlp_mem->cost_value;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_rti_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_opts_get(void *config_, void *dims_, void *opts_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
if (!strcmp("nlp_opts", field))
{
void **value = return_value_;
*value = opts->nlp_opts;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_rti_opts_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_work_get(void *config_, void *dims_, void *work_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_workspace *work = work_;
if (!strcmp("nlp_work", field))
{
void **value = return_value_;
*value = work->nlp_work;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_rti_work_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_rti_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_rti_opts_update;
config->opts_set = &ocp_nlp_sqp_rti_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_rti_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp_rti;
config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default;
config->precompute = &ocp_nlp_sqp_rti_precompute;
config->get = &ocp_nlp_sqp_rti_get;
config->opts_get = &ocp_nlp_sqp_rti_opts_get;
config->work_get = &ocp_nlp_sqp_rti_work_get;
return;
}
|
GB_binop__eq_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_bool
// A.*B function (eWiseMult): GB_AemultB__eq_bool
// A*D function (colscale): GB_AxD__eq_bool
// D*A function (rowscale): GB_DxB__eq_bool
// C+=B function (dense accum): GB_Cdense_accumB__eq_bool
// C+=b function (dense accum): GB_Cdense_accumb__eq_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_bool
// C=scalar+B GB_bind1st__eq_bool
// C=scalar+B' GB_bind1st_tran__eq_bool
// C=A+scalar GB_bind2nd__eq_bool
// C=A'+scalar GB_bind2nd_tran__eq_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_BOOL || GxB_NO_EQ_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__eq_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion
/// and balanced tokens must be handled using the specific consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion);
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
return ConsumeToken();
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
void *Info,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
std::function<void()> Completer = nullptr);
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedContsructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DSC_normal:
case DSC_class:
case DSC_top_level:
case DSC_objc_method_result:
case DSC_condition:
return false;
case DSC_template_type_arg:
case DSC_type_specifier:
case DSC_trailing:
case DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(unsigned Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
void ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
Declarator::TheContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// \brief Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
Declarator::TheContext Context
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
// an attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!getLangOpts().CPlusPlus11)
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
// Forbid C++11 attributes that appear on certain syntactic
// locations which standard permits but we don't supported yet,
// for example, attributes appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// \brief Skip C++11 attributes and return the end location of the last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (getLangOpts().CPlusPlus11 &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++-style attribute argument list. Returns true if this
/// results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype,
SourceLocation SwiftNewtypeLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
std::vector<IdentifierInfo*>& Ident,
std::vector<SourceLocation>& NamespaceLoc,
unsigned int index, SourceLocation& InlineLoc,
ParsedAttributes& attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
unsigned Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(unsigned Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
Decl *ParseTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseTypeParameter(unsigned Depth, unsigned Position);
Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(TemplateTy Template,
SourceLocation TemplateNameLoc,
const CXXScopeSpec &SS,
bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType();
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(unsigned Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
GB_unaryop__ainv_uint16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_fp32
// op(A') function: GB_tran__ainv_uint16_fp32
// C type: uint16_t
// A type: float
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_fp32
(
uint16_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(t1-2,3)),ceild(2*t1-2*t2-1,3)),ceild(16*t2-Nz-11,24));t3<=min(min(min(floord(4*Nt+Ny-9,24),floord(8*t1+Ny+7,24)),floord(16*t2+Ny+3,24)),floord(16*t1-16*t2+Nz+Ny+5,24));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(16*t2-Nz-51,64)),ceild(24*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(8*t1+Nx+7,64)),floord(16*t2+Nx+3,64)),floord(24*t3+Nx+11,64)),floord(16*t1-16*t2+Nz+Nx+5,64));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),6*t3+4),16*t4+14);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
nowait.c | #include<stdio.h>
#include<omp.h>
#define TAM 1000
int nthrds;
int work1(int id){
int i, ac=0;
for (i=0; i<TAM; i++)
ac += (i+id);
return ac;
}
int work2(int i,int A[]){
int cont, res=0;
for (cont=0; cont<nthrds; cont++){
res += (i+cont);
A[cont] = res;
}
return res;
}
int work3(int C[],int i){
int cont, res=0;
for (cont=0; cont<nthrds; cont++){
res += (i+cont);
C[cont] = res;
}
return res;
}
int work4(int id){
int i, ac=0;
for (i=0; i<TAM; i++)
ac += (i*2+id);
return ac;
}
int main(){
int A[nthrds], B[TAM], C[TAM], id, i;
nthrds = omp_get_num_threads();
#pragma omp parallel shared(A, B, C) private(id)
{
id = omp_get_thread_num();
printf("Thread %d executes work1\n", id);
A[id] = work1(id);
#pragma omp barrier
#pragma omp for
for (i=0; i<TAM; i++){
C[i]=work2(i,A);
}
printf("Thread %d finish work3\n", id);
#pragma omp for nowait
for (i=0; i<TAM; i++){
B[i]=work3(C,i);
}
printf("Thread %d finish work2\n", id);
printf("Thread %d executes work4\n", id);
A[id] = work4(id);
}
return 0;
}
|
GB_unop__identity_int64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_bool
// op(A') function: GB_unop_tran__identity_int64_bool
// C type: int64_t
// A type: bool
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_bool
(
int64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr39591-3.c | /* PR other/39591 */
/* { dg-do run } */
/* { dg-options "-O2" } */
extern void abort (void);
int err, a[40];
void __attribute__((noinline))
foo (int *array)
{
#pragma omp task
{
int j;
for (j = 0; j < sizeof array / sizeof array[0]; j++)
if (array[j] != 0x55555555)
#pragma omp atomic
err++;
}
}
int
main (void)
{
int k;
for (k = 0; k < sizeof a / sizeof a[0]; k++)
a[k] = 0x55555555;
#pragma omp parallel
{
int i;
#pragma omp for schedule (dynamic)
for (i = 0; i < 50; i++)
foo (a);
}
if (err)
abort ();
return 0;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "coders/coders-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
MagickBooleanType
supported;
PixelChannel
channel;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(QuantumScale*
GetPixelAlpha(image,q)*opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,ClampToQuantum((double) QuantumRange*
GetPixelAlpha(image,q)/(MagickRealType) opacity),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const PixelChannel channel,
const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
ssize_t
index;
if (channel == GrayPixelChannel)
{
index=(ssize_t) pixel;
if (packet_size == 1)
index=(ssize_t) ScaleQuantumToChar((Quantum) index);
index=ConstrainColormapIndex(image,index,exception);
SetPixelIndex(image,(Quantum) index,q);
}
else
{
index=(ssize_t) GetPixelIndex(image,q);
index=ConstrainColormapIndex(image,index,exception);
}
color=image->colormap+index;
if (channel == AlphaPixelChannel)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
}
else
SetPixelChannel(image,channel,pixel,q);
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const ssize_t row,
const PixelChannel channel,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum(((MagickRealType) QuantumRange)*nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channel,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channel,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const PixelChannel channel,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,
const PixelChannel channel,MagickOffsetType *sizes,
ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,y,channel,pixels,exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,
const PixelChannel channel,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,y,channel,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel_index,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
end_offset,
offset;
MagickBooleanType
status;
PixelChannel
channel;
end_offset=(MagickOffsetType) layer_info->channel_info[channel_index].size-2;
if (layer_info->channel_info[channel_index].supported == MagickFalse)
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
channel_image=image;
channel=layer_info->channel_info[channel_index].channel;
mask=(Image *) NULL;
if (channel == ReadMaskPixelChannel)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)) ||
(layer_info->mask.page.width < 1) ||
(layer_info->mask.page.height < 1))
{
(void) SeekBlob(image,end_offset,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
channel=GrayPixelChannel;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,channel,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,channel,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,channel,compression,
(const size_t) end_offset,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+end_offset,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType GetPixelChannelFromPsdIndex(const PSDInfo *psd_info,
ssize_t index,PixelChannel *channel)
{
*channel=RedPixelChannel;
switch (psd_info->mode)
{
case BitmapMode:
case IndexedMode:
case GrayscaleMode:
{
if (index == 1)
index=-1;
else if (index > 1)
index=StartMetaPixelChannel+index-2;
break;
}
case LabMode:
case MultichannelMode:
case RGBMode:
{
if (index == 3)
index=-1;
else if (index > 3)
index=StartMetaPixelChannel+index-4;
break;
}
case CMYKMode:
{
if (index == 4)
index=-1;
else if (index > 4)
index=StartMetaPixelChannel+index-5;
break;
}
}
if ((index < -2) || (index >= MaxPixelChannels))
return(MagickFalse);
if (index == -1)
*channel=AlphaPixelChannel;
else if (index == -2)
*channel=ReadMaskPixelChannel;
else
*channel=(PixelChannel) index;
return(MagickTrue);
}
static void SetPsdMetaChannels(Image *image,const PSDInfo *psd_info,
const unsigned short channels,ExceptionInfo *exception)
{
ssize_t
number_meta_channels;
number_meta_channels=(ssize_t) channels-psd_info->min_channels;
if (image->alpha_trait == BlendPixelTrait)
number_meta_channels--;
if (number_meta_channels > 0)
(void) SetPixelMetaChannels(image,(size_t) number_meta_channels,exception);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
SetPsdMetaChannels(layer_info->image,psd_info,layer_info->channels,exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const Image *image,
const PSDInfo *psd_info,LayerInfo *layer_info)
{
int
channel_type;
size_t
blob_size;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
blob_size=(size_t) GetBlobSize(image);
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
PixelChannel
channel;
if (layer_info->channel_info[i].size >= blob_size)
return(MagickFalse);
if (layer_info->channel_info[i].supported == MagickFalse)
continue;
channel=layer_info->channel_info[i].channel;
if ((i == 0) && (psd_info->mode == IndexedMode) &&
(channel != RedPixelChannel))
return(MagickFalse);
if (channel == AlphaPixelChannel)
{
channel_type|=AlphaChannel;
continue;
}
if (channel == RedPixelChannel)
channel_type&=~RedChannel;
else if (channel == GreenPixelChannel)
channel_type&=~GreenChannel;
else if (channel == BluePixelChannel)
channel_type&=~BlueChannel;
else if (channel == BlackPixelChannel)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
count,
index,
i,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].supported=GetPixelChannelFromPsdIndex(
psd_info,(ssize_t) ReadBlobSignedShort(image),
&layer_info[i].channel_info[j].channel);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].channel,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(image,psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (layer_info[i].channel_info[j].channel == AlphaPixelChannel)
{
layer_info[i].image->alpha_trait=BlendPixelTrait;
break;
}
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
SetPsdMetaChannels(image,psd_info,psd_info->channels,exception);
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
PixelChannel
channel;
status=GetPixelChannelFromPsdIndex(psd_info,i,&channel);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageError,"MaximumChannelsExceeded","'%.20g'",(double) i);
break;
}
if (compression == RLE)
status=ReadPSDChannelRLE(image,channel,sizes+(i*image->rows),exception);
else
status=ReadPSDChannelRaw(image,channel,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
const char
*option;
Image
*next;
MagickBooleanType
replicate_profile;
option=GetImageOption(image_info,"psd:replicate-profile");
replicate_profile=IsStringTrue(option);
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
{
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
if (replicate_profile == MagickFalse)
break;
}
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType) &&
(image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
CompressionType
compression;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
(void) WriteBlobMSBLong(image,0); /* user mask data */
}
/*
Write composite image.
*/
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
a.35.6.c | /* { dg-do compile } */
void
wrong6 (int n)
{
#pragma omp parallel
{
#pragma omp single
{
work (n, 0);
/* incorrect nesting of barrier region in a single region */
#pragma omp barrier
work (n, 1);
}
}
}
|
cpu_rnnt.h | #pragma once
#include <tuple>
#include <cmath>
#include <cstring>
#include <limits>
#include <algorithm>
#include <numeric>
#include <chrono>
#if !defined(RNNT_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "rnnt_helper.h"
template<typename ProbT>
class CpuRNNT {
public:
// Noncopyable
CpuRNNT(int minibatch, int maxT, int maxU, int alphabet_size, void* workspace,
int blank, int num_threads, bool batch_first) :
minibatch_(minibatch), maxT_(maxT), maxU_(maxU), alphabet_size_(alphabet_size),
workspace_(workspace), blank_(blank), num_threads_(num_threads), batch_first(batch_first) {
#if defined(RNNT_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuRNNT(const CpuRNNT&) = delete;
CpuRNNT& operator=(const CpuRNNT&) = delete;
rnntStatus_t cost_and_grad(const ProbT* const log_probs,
ProbT* grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
rnntStatus_t score_forward(const ProbT* const log_probs,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuRNNT_index {
public:
CpuRNNT_index(int U, int maxU, int minibatch, int alphabet_size, bool batch_first);
int U;
int maxU;
int minibatch;
int alphabet_size;
bool batch_first;
int operator()(int t, int u);
int operator()(int t, int u, int v);
};
class CpuRNNT_metadata {
public:
CpuRNNT_metadata(int T, int U, void* workspace, size_t bytes_used, int blank,
const int* const labels, const ProbT* const log_probs, CpuRNNT_index& idx);
ProbT* alphas;
ProbT* betas;
ProbT* log_probs2; // only store blank & label
private:
void setup_probs(int T, int U, const int* const labels, int blank,
const ProbT* const log_probs, CpuRNNT_index& idx);
};
int minibatch_;
int maxT_;
int maxU_;
int alphabet_size_; // Number of characters plus blank
void* workspace_;
int blank_;
int num_threads_;
bool batch_first;
ProbT cost_and_grad_kernel(const ProbT* const log_probs, ProbT* grad,
const int* const labels, int mb,
int T, int U, size_t bytes_used);
ProbT compute_alphas(const ProbT* const log_probs, int T, int U, ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
int T, int U, ProbT* alphas, ProbT* betas,
const int* const labels, ProbT logll);
};
template<typename ProbT>
CpuRNNT<ProbT>::CpuRNNT_metadata::CpuRNNT_metadata(int T, int U, void* workspace, size_t bytes_used, int blank,
const int* const labels, const ProbT* const log_probs, CpuRNNT_index& idx) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
log_probs2 = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U * 2;
setup_probs(T, U, labels, blank, log_probs, idx);
}
template<typename ProbT>
void
CpuRNNT<ProbT>::CpuRNNT_metadata::setup_probs(int T, int U, const int* const labels, int blank,
const ProbT* const log_probs, CpuRNNT_index& idx) {
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
int offset = (t * U + u) * 2;
log_probs2[offset] = log_probs[idx(t, u, blank)];
// labels do not have first blank
if (u < U-1) log_probs2[offset + 1] = log_probs[idx(t, u, labels[u])];
}
}
}
template<typename ProbT>
CpuRNNT<ProbT>::CpuRNNT_index::CpuRNNT_index(int U, int maxU, int minibatch, int alphabet_size, bool batch_first) :
U(U), maxU(maxU), minibatch(minibatch), alphabet_size(alphabet_size), batch_first(batch_first) {}
template<typename ProbT>
inline int CpuRNNT<ProbT>::CpuRNNT_index::operator()(int t, int u) {
return t * U + u;
}
template<typename ProbT>
inline int CpuRNNT<ProbT>::CpuRNNT_index::operator()(int t, int u, int v) {
if (batch_first)
return (t * maxU + u) * alphabet_size + v;
return (t * maxU + u) * minibatch * alphabet_size + v;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::cost_and_grad_kernel(const ProbT* const log_probs, ProbT* grad,
const int* const labels,
int mb, int T, int U, size_t bytes_used) {
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
CpuRNNT_metadata rnntm(T, U, workspace_, bytes_used, blank_, labels, log_probs, idx);
if (batch_first) {
// zero grads
memset(grad, 0, sizeof(ProbT) * maxT_ * maxU_ * alphabet_size_);
}
ProbT llForward = compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, rnntm.log_probs2, T, U,
rnntm.alphas,
rnntm.betas,
labels,
llForward);
ProbT diff = std::abs(llForward - llBackward);
if (diff > 1e-1) {
printf("WARNING: Forward backward likelihood mismatch %f\n", diff);
}
return -llForward;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::compute_alphas(const ProbT* const log_probs, int T, int U, ProbT* alphas) {
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
alphas[0] = 0;
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
if (u == 0 && t > 0)
alphas[idx(t, 0)] = alphas[idx(t-1, 0)] + log_probs[idx(t-1, 0) * 2];
if (t == 0 && u > 0)
alphas[idx(0, u)] = alphas[idx(0, u-1)] + log_probs[idx(0, u-1) * 2 + 1];
if (t > 0 && u > 0) {
ProbT no_emit = alphas[idx(t-1, u)] + log_probs[idx(t-1, u) * 2];
ProbT emit = alphas[idx(t, u-1)] + log_probs[idx(t, u-1) * 2 + 1];
alphas[idx(t, u)] = rnnt_helper::log_sum_exp<ProbT>(emit, no_emit);
}
}
}
#ifdef DEBUG_KERNEL
printf("cpu alphas:\n");
printf("%d %d\n", T, U);
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
printf("%.2f ", alphas[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
ProbT loglike = alphas[idx(T-1, U-1)] + log_probs[idx(T-1, U-1) * 2];
return loglike;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const log_probs,
int T, int U, ProbT* alphas, ProbT* betas,
const int* const labels, ProbT logll) {
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
betas[idx(T-1, U-1)] = log_probs[idx(T-1, U-1) * 2];
for (int t = T-1; t >= 0; --t) {
for (int u = U-1; u >= 0; --u) {
if (u == U-1 && t < T-1)
betas[idx(t, U-1)] = betas[idx(t+1, U-1)] + log_probs[idx(t, U-1) * 2];
if (t == T-1 && u < U-1)
betas[idx(T-1, u)] = betas[idx(T-1, u+1)] + log_probs[idx(T-1, u) * 2 + 1];
if (t < T-1 && u < U-1) {
ProbT no_emit = betas[idx(t+1, u)] + log_probs[idx(t, u) * 2];
ProbT emit = betas[idx(t, u+1)] + log_probs[idx(t, u) * 2 + 1];
betas[idx(t, u)] = rnnt_helper::log_sum_exp<ProbT>(emit, no_emit);
}
}
}
#ifdef DEBUG_KERNEL
printf("cpu betas:\n");
printf("%d %d\n", T, U);
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
printf("%.2f ", betas[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
ProbT loglike = betas[0];
// Gradients w.r.t. log probabilities
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
if (t < T-1) {
ProbT g = alphas[idx(t, u)] + betas[idx(t+1, u)];
grad[idx(t, u, blank_)] = -std::exp(log_probs[idx(t, u) * 2] + g - loglike);
}
if (u < U-1) {
ProbT g = alphas[idx(t, u)] + betas[idx(t, u+1)];
grad[idx(t, u, labels[u])] = -std::exp(log_probs[idx(t, u) * 2 + 1] + g - loglike);
}
}
}
// gradient to the last blank transition
grad[idx(T-1, U-1, blank_)] = -std::exp(log_probs[idx(T-1, U-1) * 2] + alphas[idx(T-1, U-1)] - loglike);
return loglike;
}
template<typename ProbT>
rnntStatus_t
CpuRNNT<ProbT>::cost_and_grad(const ProbT* const log_probs,
ProbT* grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
// per minibatch memory
size_t per_minibatch_bytes = 0;
// alphas & betas
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
// blank & label log probability cache
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int U = label_lengths[mb] + 1; // Number of labels in transcription
int batch_size = alphabet_size_;
if (batch_first) batch_size = maxT_ * maxU_ * alphabet_size_;
costs[mb] = cost_and_grad_kernel(log_probs + mb * batch_size,
grads + mb * batch_size,
flat_labels + mb * (maxU_ - 1),
mb, T, U, mb * per_minibatch_bytes);
}
return RNNT_STATUS_SUCCESS;
}
template<typename ProbT>
rnntStatus_t
CpuRNNT<ProbT>::score_forward(const ProbT* const log_probs,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
// per minibatch memory
size_t per_minibatch_bytes = 0;
// alphas & betas
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
// blank & label log probability cache
per_minibatch_bytes += sizeof(ProbT) * maxT_ * maxU_ * 2;
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int U = label_lengths[mb] + 1; // Number of labels in transcription
int batch_size = alphabet_size_;
if (batch_first) batch_size = maxT_ * maxU_ * alphabet_size_;
CpuRNNT_index idx(U, maxU_, minibatch_, alphabet_size_, batch_first);
CpuRNNT_metadata rnntm(T, U, workspace_, mb * per_minibatch_bytes, blank_,
flat_labels + mb * (maxU_ - 1), log_probs + mb * batch_size, idx);
costs[mb] = -compute_alphas(rnntm.log_probs2, T, U, rnntm.alphas);
}
return RNNT_STATUS_SUCCESS;
}
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <mxnet/base.h>
#include <algorithm>
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
#endif // __CUDACC__
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const int ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const int ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
struct fill {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType val) {
out[i] = val;
}
};
struct set_zero {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out) {
out[i] = static_cast<DType>(0);
}
};
template<typename OP, typename xpu>
struct Kernel;
template<typename OP>
struct Kernel<OP, cpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *s, int N, Args... args) {
#if (MXNET_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
mandel-omp-task-row.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
#pragma omp parallel
#pragma omp single
{
for (row = 0; row < height; ++row) {
#pragma omp task firstprivate(row,col)
{
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
clantr.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlantr.c, normal z -> c, Fri Sep 28 17:38:08 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
/***************************************************************************//**
*
* @ingroup plasma_lantr
*
* Returns the norm of a trapezoidal or triangular matrix as
*
* clantr = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm
* (
* ( norm1(A), NORM = PlasmaOneNorm
* (
* ( normI(A), NORM = PlasmaInfNorm
* (
* ( normF(A), NORM = PlasmaFrobeniusNorm
*
* where norm1 denotes the one norm of a matrix (maximum column sum),
* normI denotes the infinity norm of a matrix (maximum row sum) and
* normF denotes the Frobenius norm of a matrix (square root of sum
* of squares). Note that max(abs(A(i,j))) is not a consistent matrix
* norm.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: max norm
* - PlasmaOneNorm: one norm
* - PlasmaInfNorm: infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix A. m >= 0. When m = 0,
* the returned value is set to zero.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0. When n = 0,
* the returned value is set to zero.
*
* @param[in] pA
* The m-by-n trapezoidal matrix A.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
*******************************************************************************
*
* @retval float
* The specified norm of the trapezoidal or triangular matrix A.
*
*******************************************************************************
*
* @sa plasma_omp_clantr
* @sa plasma_clantr
* @sa plasma_slantr
* @sa plasma_slantr
*
******************************************************************************/
float plasma_clantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
plasma_complex32_t *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) {
plasma_error("illegal value of norm");
return -1;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -2;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
return -3;
}
if (m < 0) {
plasma_error("illegal value of m");
return -4;
}
if (n < 0) {
plasma_error("illegal value of n");
return -5;
}
if (lda < imax(1, m)) {
printf("%d\n", lda);
plasma_error("illegal value of lda");
return -7;
}
// quick return
if (imin(n, m) == 0)
return 0.0;
// Tune parameters.
if (plasma->tuning)
plasma_tune_lantr(plasma, PlasmaComplexFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Allocate workspace.
float *work = NULL;
switch (norm) {
case PlasmaMaxNorm:
work = (float*)malloc((size_t)A.mt*A.nt*sizeof(float));
break;
case PlasmaOneNorm:
work = (float*)calloc(((size_t)A.mt*A.n+A.n), sizeof(float));
break;
case PlasmaInfNorm:
work = (float*)calloc(((size_t)A.nt*A.m+A.m), sizeof(float));
break;
case PlasmaFrobeniusNorm:
work = (float*)calloc((size_t)2*A.mt*A.nt, sizeof(float));
break;
}
if (work == NULL) {
plasma_error("malloc() failed");
return PlasmaErrorOutOfMemory;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
float value;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cge2desc(pA, lda, A, &sequence, &request);
// Call tile async function.
plasma_omp_clantr(norm, uplo, diag, A, work, &value,
&sequence, &request);
}
// implicit synchronization
free(work);
// Free matrix in tile layout.
plasma_desc_destroy(&A);
// Return the norm.
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_lantr
*
* Calculates the max, one, infinity or Frobenius norm of a general matrix.
* Non-blocking equivalent of plasma_clantr(). May return before the
* computation is finished. Operates on matrices stored by tiles. All matrices
* are passed through descriptors. All dimensions are taken from the
* descriptors. Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] norm
* - PlasmaMaxNorm: Max norm
* - PlasmaOneNorm: One norm
* - PlasmaInfNorm: Infinity norm
* - PlasmaFrobeniusNorm: Frobenius norm
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] A
* The descriptor of matrix A.
*
* @param[out] work
* Workspace of size:
* - PlasmaMaxNorm: A.mt*A.nt
* - PlasmaOneNorm: A.mt*A.n + A.n
* - PlasmaInfNorm: A.mt*A.n + A.n
* - PlasmaFrobeniusNorm: 2*A.mt*A.nt
*
* @param[out] value
* The calculated value of the norm requested.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_clantr
* @sa plasma_omp_clantr
* @sa plasma_omp_slantr
* @sa plasma_omp_slantr
*
******************************************************************************/
void plasma_omp_clantr(plasma_enum_t norm, plasma_enum_t uplo,
plasma_enum_t diag, plasma_desc_t A,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) &&
(norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) {
plasma_error("illegal value of norm");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid descriptor A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0) {
*value = 0.0;
return;
}
// Call the parallel function.
plasma_pclantr(norm, uplo, diag, A, work, value, sequence, request);
}
|
tstfft.c | /* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; fill-column: 79; coding: iso-latin-1-unix -*- */
/* tstfft.c
*/
/*
* ref: http://csweb.cs.wfu.edu/~torgerse/fftw_2.1.2/fftw_2.html
* ref: http://www.fftw.org/fftw3_doc/Complex-One_002dDimensional-DFTs.html#Complex-One_002dDimensional-DFTs
* author: Ringbo
* date: 20-7-11
*/
#include <hpcc.h>
#include "hpccfft.h"
static int
TestFFT1(HPCC_Params *params, int doIO, FILE *outFile, double *UGflops, int *Un, int *Ufailure) {
fftw_complex *in, *out;
fftw_plan p;
hpcc_fftw_plan ip;
double Gflops = -1.0;
double maxErr, tmp1, tmp2, tmp3, t0, t1, t2, t3;
int i, n, flags, failure = 1;
double deps = HPL_dlamch( HPL_MACH_EPS );
#ifdef HPCC_FFT_235
int f[3];
/* Need 2 vectors for input and output and 1 vector of scratch spaces */
n = HPCC_LocalVectorSize( params, 3, sizeof(fftw_complex), 0 );
/* Adjust local size for factors */
for ( ; HPCC_factor235( n, f ); n--)
; /* EMPTY */
#else
/* Need 2 vectors and vectors' sizes as power of 2 */
n = HPCC_LocalVectorSize( params, 2, sizeof(fftw_complex), 1 );
#endif
/* need to use fftw_malloc() so that the returned pointers will be aligned properly for SSE
instructions on Intel/AMD systems */
in = (fftw_complex *)HPCC_fftw_malloc( (sizeof *in) * n );
out = (fftw_complex *)HPCC_fftw_malloc( (sizeof *out) * n );
if (! in || ! out) goto comp_end;
/* Make sure that `inout' and `work' are initialized in parallel if using
Open MP: this will ensure better placement of pages if first-touch policy
is used by a distrubuted shared memory machine. */
#ifdef _OPENMP
#pragma omp parallel for
for (i = 0; i < n; ++i) {
c_re( in[i] ) = c_re( out[i] ) = 0.0;
c_re( in[i] ) = c_im( out[i] ) = 0.0;
}
#endif
t0 = -MPI_Wtime();
HPCC_bcnrand( 2*n, 0, in );
t0 += MPI_Wtime();
#ifdef HPCC_FFTW_ESTIMATE
flags = FFTW_ESTIMATE;
#else
flags = FFTW_MEASURE;
#endif
t1 = -MPI_Wtime();
#ifndef USING_FFTW3
p = fftw_create_plan( n, FFTW_FORWARD, flags );
#else
p = fftwf_plan_dft_1d( n, in, out, FFTW_FORWARD, flags);
#endif
t1 += MPI_Wtime();
if (! p) goto comp_end;
t2 = -MPI_Wtime();
#ifndef USING_FFTW3
fftw_one( p, in, out );
#else
fftwf_execute( p);
#endif
t2 += MPI_Wtime();
fftwf_destroy_plan(p);
ip = HPCC_fftw_create_plan( n, FFTW_BACKWARD, FFTW_ESTIMATE );
if (ip) {
t3 = -MPI_Wtime();
HPCC_fftw_one( ip, out, in );
t3 += MPI_Wtime();
HPCC_fftw_destroy_plan( ip );
}
HPCC_bcnrand( 2*(s64Int)n, 0, out ); /* regenerate data */
maxErr = 0.0;
for (i = 0; i < n; i++) {
tmp1 = c_re( in[i] ) - c_re( out[i] );
tmp2 = c_im( in[i] ) - c_im( out[i] );
tmp3 = sqrt( tmp1*tmp1 + tmp2*tmp2 );
maxErr = maxErr >= tmp3 ? maxErr : tmp3;
}
if (maxErr / log(n) / deps < params->test.thrsh) failure = 0;
if (doIO) {
fprintf( outFile, "Vector size: %d\n", n );
fprintf( outFile, "Generation time: %9.3f\n", t0 );
fprintf( outFile, "Tuning: %9.3f\n", t1 );
fprintf( outFile, "Computing: %9.3f\n", t2 );
fprintf( outFile, "Inverse FFT: %9.3f\n", t3 );
fprintf( outFile, "max(|x-x0|): %9.3e\n", maxErr );
}
if (t2 > 0.0) Gflops = 1e-9 * (5.0 * n * log(n) / log(2.0)) / t2;
comp_end:
if (out) HPCC_fftw_free( out );
if (in) HPCC_fftw_free( in );
*UGflops = Gflops;
*Un = n;
*Ufailure = failure;
return 0;
}
int
HPCC_TestFFT(HPCC_Params *params, int doIO, double *UGflops, int *Un, int *Ufailure) {
int rv, n, failure = 1;
double Gflops;
FILE *outFile = NULL;
if (doIO) {
outFile = fopen( params->outFname, "a" );
if (! outFile) {
outFile = stderr;
fprintf( outFile, "Cannot open output file.\n" );
return 1;
}
} else {
outFile = fopen( "/dev/null", "w" ); /* special filename Unix file systems */
if (! outFile) {
outFile = fopen( "nul", "w"); /* special filename on Windows, produces no output */
}
}
n = 0;
Gflops = -1.0;
rv = TestFFT1( params, doIO, outFile, &Gflops, &n, &failure );
if (doIO) {
fflush( outFile );
}
if (outFile)
fclose( outFile );
if (UGflops) *UGflops = Gflops;
if (Un) *Un = n;
if (Ufailure) *Ufailure = failure;
return rv;
}
|
omp_alloc_def_fb.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main() {
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
void *p[2];
at[0].key = omp_atk_pool_size;
at[0].value = 2 * 1024 * 1024;
at[1].key = omp_atk_fallback;
at[1].value = omp_atv_default_mem_fb;
a = omp_init_allocator(omp_large_cap_mem_space, 2, at);
printf("allocator large created: %p\n", (void *)a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
p[i] = omp_alloc(1024 * 1024, a);
#pragma omp barrier
printf("th %d, ptr %p\n", i, p[i]);
omp_free(p[i], a);
}
// Both pointers should be non-NULL
if (p[0] != NULL && p[1] != NULL) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LambdaMangleContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/ModuleLoader.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include <deque>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class NonNullAttr;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TargetAttributesSema;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) LLVM_DELETED_FUNCTION;
void operator=(const Sema &) LLVM_DELETED_FUNCTION;
mutable const TargetAttributesSema* TheTargetAttributesSema;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
static bool
shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
return !Old->isHidden() || New->isExternallyVisible();
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
OwningPtr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief A mapping from external names to the most recent
/// locally-scoped extern "C" declaration with that name.
///
/// This map contains external declarations introduced in local
/// scopes, e.g.,
///
/// \code
/// extern "C" void f() {
/// void foo(int, int);
/// }
/// \endcode
///
/// Here, the name "foo" will be associated with the declaration of
/// "foo" within f. This name is not visible outside of
/// "f". However, we still find it in two cases:
///
/// - If we are declaring another global or extern "C" entity with
/// the name "foo", we can find "foo" as a previous declaration,
/// so that the types of this external declaration can be checked
/// for compatibility.
///
/// - If we would implicitly declare "foo" (e.g., due to a call to
/// "foo" in C when no prototype or definition is visible), then
/// we find this declaration of "foo" and complain that it is
/// not visible.
llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the destructors seen during a class definition that had their
/// exception spec computation delayed because it depended on an unparsed
/// exception spec.
SmallVector<CXXDestructorDecl*, 2> DelayedDestructorExceptionSpecs;
/// \brief All the overriding destructors seen during a class definition
/// (there could be multiple due to nested classes) that had their exception
/// spec checks delayed, plus the overridden destructor.
SmallVector<std::pair<const CXXDestructorDecl*,
const CXXDestructorDecl*>, 2>
DelayedDestructorExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, const FunctionDecl *FD);
LateTemplateParserCB *LateTemplateParser;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) {
LateTemplateParser = LTP;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(0) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != 0; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = 0;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == NULL);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = 0;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
OwningPtr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statment).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for the lambda expression
/// if the normal declaration context does not suffice, e.g., in a
/// default function argument.
Decl *LambdaContextDecl;
/// \brief The context information used to mangle lambda expressions
/// within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions.
IntrusiveRefCntPtr<LambdaMangleContext> LambdaMangle;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *LambdaContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
LambdaContextDecl(LambdaContextDecl), LambdaMangle() { }
/// \brief Retrieve the mangling context for lambdas.
LambdaMangleContext &getLambdaMangleContext() {
assert(LambdaContextDecl && "Need to have a lambda context declaration");
if (!LambdaMangle)
LambdaMangle = new LambdaMangleContext;
return *LambdaMangle;
}
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, SmallVector<ParmVarDecl *, 1> >
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
llvm::SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
typedef llvm::MCAsmParserSemaCallback::InlineAsmIdentifierInfo
InlineAsmIdentifierInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = 0);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
const TargetAttributesSema &getTargetAttributesSema() const;
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string getFixItZeroInitializerForType(QualType T) const;
std::string getFixItZeroLiteralForType(QualType T) const;
ExprResult Owned(Expr* E) { return E; }
ExprResult Owned(ExprResult R) { return R; }
StmtResult Owned(Stmt* S) { return S; }
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
void PushLambdaScope(CXXRecordDecl *Lambda, CXXMethodDecl *CallOperator);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP =0,
const Decl *D = 0, const BlockExpr *blkExpr = 0);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda expression, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = 0);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = 0);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
llvm::MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = 0);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = 0,
bool *MissingEmptyExceptionSpecification = 0,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template<typename T1>
class BoundTypeDiagnoser1 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
public:
BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
if (Suppressed) return;
S.Diag(Loc, DiagID) << getPrintable(Arg1) << T;
}
virtual ~BoundTypeDiagnoser1() { }
};
template<typename T1, typename T2>
class BoundTypeDiagnoser2 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
const T2 &Arg2;
public:
BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1,
const T2 &Arg2)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
Arg2(Arg2) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
if (Suppressed) return;
S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T;
}
virtual ~BoundTypeDiagnoser2() { }
};
template<typename T1, typename T2, typename T3>
class BoundTypeDiagnoser3 : public TypeDiagnoser {
unsigned DiagID;
const T1 &Arg1;
const T2 &Arg2;
const T3 &Arg3;
public:
BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1,
const T2 &Arg2, const T3 &Arg3)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1),
Arg2(Arg2), Arg3(Arg3) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) {
if (Suppressed) return;
S.Diag(Loc, DiagID)
<< getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T;
}
virtual ~BoundTypeDiagnoser3() { }
};
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template<typename T1>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireCompleteType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireCompleteType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template<typename T1>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireCompleteExprType(E, Diagnoser);
}
template<typename T1, typename T2>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireCompleteExprType(E, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1,
const T2 &Arg2, const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template<typename T1>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireLiteralType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireLiteralType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireLiteralType(SourceLocation Loc, QualType T,
unsigned DiagID, const T1 &Arg1, const T2 &Arg2,
const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2,
Arg3);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
QualType BuildDecltypeType(Expr *E, SourceLocation Loc);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
/// Nonzero if we are currently parsing a function declarator. This is a counter
/// as opposed to a boolean so we can deal with nested function declarators
/// such as:
/// void f(void (*g)(), ...)
unsigned InFunctionDeclarator;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = 0);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = 0,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = 0);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
bool DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template;
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = 0);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl* ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
void ActOnStartFunctionDeclarator();
void ActOnEndFunctionDeclarator();
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
void checkVoidParamDecl(ParmVarDecl *Param);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnCXXForRangeDecl(Decl *D);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
Decl **Group,
unsigned NumDecls);
DeclGroupPtrTy BuildDeclaratorGroup(Decl **Group, unsigned NumDecls,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(Decl **Group, unsigned NumDecls);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief Create an implicit import of the given module at the given
/// source location.
///
/// This routine is typically used for error recovery, when the entity found
/// by name lookup is actually hidden within a module that we know about but
/// the user has forgotten to import.
void createImplicitModuleImport(SourceLocation Loc, Module *Mod);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo &Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = 0);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param ExplicitInstantiationOrSpecialization When true, we are checking
/// whether the declaration is in scope for the purposes of explicit template
/// instantiation or specialization. The default is false.
bool isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S = 0,
bool ExplicitInstantiationOrSpecialization = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, StringRef Format,
int FormatIdx, int FirstArg,
unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &OldDecls,
bool OldDeclsWereHidden);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool OldIsHidden);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = 0);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T);
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
bool Operator, SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = 0);
FunctionDecl *ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair* Found = 0);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl **Param, ParmVarDecl **ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
private:
bool CppLookupName(LookupResult &R, Scope *S);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
public:
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRawAndTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, bool Operator,
SourceLocation Loc,
ArrayRef<Expr *> Args,
ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext = 0,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = 0);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage,
bool ExplicitInstantiationOrSpecialization);
bool DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
bool NonInheritable = true,
bool Inheritable = true);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool NonInheritable = true,
bool Inheritable = true,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = 0);
bool CheckNoReturnAttr(const AttributeList &attr);
void CheckAlignasUnderalignment(Decl *D);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
bool &IncompleteImpl, unsigned DiagID);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl,
ObjCInterfaceDecl *IDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckProtocolMethodDefs - This routine checks unimplemented
/// methods declared in protocol, and those referenced by it.
void CheckProtocolMethodDefs(SourceLocation ImpLoc,
ObjCProtocolDecl *PDecl,
bool& IncompleteImpl,
const SelectorSet &InsMap,
const SelectorSet &ClsMap,
ObjCContainerDecl *CDecl);
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// CollectImmediateProperties - This routine collects all properties in
/// the class and its conforming protocols; but not those it its super class.
void CollectImmediateProperties(ObjCContainerDecl *CDecl,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
TypeSourceInfo *T,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = 0);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool warn, bool instance);
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false,
bool warn=true) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
warn, /*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// DiagnoseMismatchedMethodsInGlobalPool - This routine goes through list of
/// methods in global pool and issues diagnostic on identical selectors which
/// have mismathched types.
void DiagnoseMismatchedMethodsInGlobalPool();
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(0) { }
// FIXME: The const_cast here is ugly. RValue references would make this
// much nicer (or we could duplicate a bunch of the move semantics
// emulation code from Ownership.h).
FullExprArg(const FullExprArg& Other) : E(Other.E) {}
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).release());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.release());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
MultiStmtArg Elts,
bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
MultiStmtArg Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
StmtResult ActOnSEHFinallyBlock(SourceLocation Loc,
Stmt *Block);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void EmitDeprecationWarning(NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty);
void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=0);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = 0,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = 0);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = 0,
bool IsInlineAsmIdentifier = false);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0,
ArrayRef<Expr *> Args = None);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = 0);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = 0,
NamedDecl *FoundD = 0);
ExprResult
BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
Expr *baseObjectExpr = 0,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
NamedDecl *D, NamedDecl *FoundD = 0);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr*> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = 0);
ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = 0);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks,
Scope *UDLScope = 0);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
bool HasTrailingLParen;
};
ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
SourceLocation OpLoc, bool IsArrow,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = 0);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base,
bool &IsArrow, SourceLocation OpLoc,
CXXScopeSpec &SS,
Decl *ObjCImpDecl,
bool HasTemplateArgs);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl,
bool HasTrailingLParen);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = 0, bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = 0,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool isTypeName,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool IsTypeName,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool IsTypeName,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can re remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
void getEPI(FunctionProtoType::ExtProtoInfo &EPI) const {
EPI.ExceptionSpecType = getExceptionSpecType();
if (EPI.ExceptionSpecType == EST_Dynamic) {
EPI.NumExceptions = size();
EPI.Exceptions = data();
} else if (EPI.ExceptionSpecType == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
EPI.ExceptionSpecType = EST_ComputedNoexcept;
EPI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).take();
}
}
FunctionProtoType::ExtProtoInfo getEPI() const {
FunctionProtoType::ExtProtoInfo EPI;
getEPI(EPI);
return EPI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// extended prototype information with the results.
void checkExceptionSpecification(ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExtProtoInfo &EPI);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
bool IsThrownVarInScope);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Argument,
bool addMallocAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// ActOnUnaryTypeTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
SourceLocation KWLoc,
ParsedType Ty,
SourceLocation RParen);
ExprResult BuildUnaryTypeTrait(UnaryTypeTrait OTT,
SourceLocation KWLoc,
TypeSourceInfo *T,
SourceLocation RParen);
/// ActOnBinaryTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnBinaryTypeTrait(BinaryTypeTrait OTT,
SourceLocation KWLoc,
ParsedType LhsTy,
ParsedType RhsTy,
SourceLocation RParen);
ExprResult BuildBinaryTypeTrait(BinaryTypeTrait BTT,
SourceLocation KWLoc,
TypeSourceInfo *LhsT,
TypeSourceInfo *RhsT,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType,
bool HasTrailingLParen);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName,
bool HasTrailingLParen);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS,
bool HasTrailingLParen);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Introduce the scope for a lambda expression.
sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Check and build an init-capture with the specified name and
/// initializer.
FieldDecl *checkInitCapture(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id, Expr *Init);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope,
bool IsInstantiation = false);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *" or "NSString *" depending on the type of
/// ValueType, which is allowed to be a built-in numeric type or
/// "char *" or "const char *".
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
SourceLocation LangLoc,
StringRef Lang,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = 0);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = 0);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
typedef LazyVector<CXXRecordDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDynamicClasses, 2, 2>
DynamicClassesType;
/// \brief A list of all of the dynamic classes in this translation
/// unit.
DynamicClassesType DynamicClasses;
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnReenterDeclaratorTemplateScope(Scope *S, DeclaratorDecl *D);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, bool Flag = true);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedExplicitlyDefaultedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = 0,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(Decl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template<typename T1>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1) {
BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
template<typename T1, typename T2>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1, const T2 &Arg2) {
BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
template<typename T1, typename T2, typename T3>
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
unsigned DiagID,
const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) {
BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter
/// list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *
MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
SourceLocation DeclLoc,
const CXXScopeSpec &SS,
TemplateParameterList **ParamLists,
unsigned NumParamLists,
bool IsFriend,
bool &IsExplicitSpecialization,
bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument> &Converted);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
const TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
///
/// \param ExpansionIntoFixedList If non-NULL, will be set true to indicate
/// when the template arguments contain a pack expansion that is being
/// expanded into a fixed parameter list.
///
/// \returns True if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool *ExpansionIntoFixedList = 0);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
const TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
const TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
SubstituteExplicitTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes,
QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = 0);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments);
UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin,
UnresolvedSetIterator SEnd,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true,
QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = 0,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = 0);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(0), Entity(0), TemplateArgs(0),
NumTemplateArgs(0), DeductionInfo(0) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type or template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
LLVM_EXPLICIT operator bool() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.`
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection>
UnqualifiedTyposCorrectedMap;
/// \brief A cache containing the results of typo correction for unqualified
/// name lookup.
///
/// The string is the string that we corrected to (which may be empty, if
/// there was no correction), while the boolean will be true when the
/// string represents a keyword.
UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = 0);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = 0,
LocalInstantiationScope *OuterMostScope = 0);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = 0,
ObjCContainerDecl *lexicalDC = 0);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
void MatchOneProtocolPropertiesInClass(Decl *CDecl,
ObjCProtocolDecl *PDecl);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
Decl **allMethods = 0, unsigned allNum = 0,
Decl **allProperties = 0, unsigned pNum = 0,
DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = 0);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSStruct - Called on well formed \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = 0,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstProtoArg,
ArrayRef<Expr *> Args,
SmallVector<Expr *, 8> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Checks to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic and returning NULL if not.
bool variadicArgumentPODCheck(const Expr *E, VariadicCallType CT);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = 0);
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = 0);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = 0);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = 0);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = 0) {
Expr *E1Tmp = E1.take(), *E2Tmp = E2.take();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = Owned(E1Tmp);
E2 = Owned(E2Tmp);
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result=0);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, Expr *BitWidth,
bool *ZeroWidth = 0);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(CUDAFunctionTarget CallerTarget,
CUDAFunctionTarget CalleeTarget);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) {
return CheckCUDATarget(IdentifyCUDATarget(Caller),
IdentifyCUDATarget(Callee));
}
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(Decl *Constructor,
CXXCtorInitializer** Initializers,
unsigned NumInitializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = 0);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=0,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args,
unsigned NumProtoArgs, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinObjectSize(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
enum StringLiteralCheckType {
SLCT_NotALiteral,
SLCT_UncheckedLiteral,
SLCT_CheckedLiteral
};
StringLiteralCheckType checkFormatStringExpr(const Expr *E,
ArrayRef<const Expr *> Args,
bool HasVAListArg,
unsigned format_idx,
unsigned firstDataArg,
FormatStringType Type,
VariadicCallType CallType,
bool inFunctionCall = true);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType);
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range);
void CheckNonNullArguments(const NonNullAttr *NonNull,
const Expr * const *ExprArgs,
SourceLocation CallSiteLoc);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
OwningPtr<llvm::DenseMap<TypeTagMagicValue, TypeTagData> >
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTWriter;
public:
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = 0,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
} // end namespace clang
#endif
|
DRB026-targetparallelfor-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Race condition due to anti-dependence within a loop offloaded to accelerators.
Data race pair: a[i]@64:5 vs. a[i+1]@64:10
*/
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[1000];
#pragma omp parallel for
for (i=0; i<len; i++)
a[i]= i;
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
for (i=0; i<len; i++)
printf("%d\n",a[i]);
return 0;
}
|
pdamax.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pdzamax.c, normal z -> d, Fri Sep 28 17:38:10 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) (double*)plasma_tile_addr(A, m, n)
/******************************************************************************/
void plasma_pdamax(plasma_enum_t colrow,
plasma_desc_t A, double *work, double *values,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
switch (colrow) {
//===================
// PlasmaColumnwise
//===================
case PlasmaColumnwise:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_damax(PlasmaColumnwise,
mvam, nvan,
A(m, n), ldam,
&work[A.n*m+n*A.nb],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_damax(PlasmaRowwise,
A.n, A.mt,
work, A.n,
values,
sequence, request);
break;
//================
// PlasmaRowwise
//================
case PlasmaRowwise:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_damax(PlasmaRowwise,
mvam, nvan,
A(m, n), ldam,
&work[A.m*n+m*A.mb],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_damax(PlasmaRowwise,
A.m, A.nt,
work, A.m,
values,
sequence, request);
}
}
|
TRPO_FVP.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
#include "TRPO.h"
double FVP (TRPOparam param, double *Result, double *Input){
//////////////////// Remarks ////////////////////
// This function computes the Fisher-Vector Product using Pearlmutter Algorithm
// Input: the vector to be multiplied with the Fisher Information Matrix
// Result: the Fisher-Vector Product
// Remarks: The length of Input and Result must be the number of all trainable parameters in the network
// Step1: ordinary forward propagation
// Step2: ordinary backward propagation
// Step3: Pearlmutter forward propagation
// Step4: Pearlmutter backward propagation
//////////////////// Read Parameters ////////////////////
// Assign Parameters
const size_t NumLayers = param.NumLayers;
char * AcFunc = param.AcFunc;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
// Dimension of Observation Space
const size_t ObservSpaceDim = LayerSize[0];
// Dimension of Action Space
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// LogStd[i]: log standard deviation for action dimension #i in the Diagonal Gaussian Distribution
double * LogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
double * Action = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Advantage = (double *) calloc(NumSamples, sizeof(double));
// Allocate Memory for Average Sample Mean and Average Sample Mean Square
// Remarks: These values are statistics calculated from the samples, to be used in the algorithm
double * AvgSampleMean = (double *) calloc(ActionSpaceDim, sizeof(double));
double * AvgSampleMeanSq = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Ordinary Forward and Backward Propagation ////////////////////
// Layer[i] : Memory of each layer's outputs, i.e. y_i
// GLayer[i]: Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. d(KL)/d(x_i)
double * Layer [NumLayers];
double * GLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
Layer[i] = (double *) calloc(LayerSize[i], sizeof(double));
GLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// GW[i]: Gradient of KL w.r.t to Neural Network Weight W[i]
// GB[i]: Gradient of KL w.r.t to Neural Network Bias B[i]
// There is one-to-one correspondence between: GW[i] and W[i], GB[i] and B[i], GStd[i] and Std[i]
double * GW [NumLayers-1];
double * GB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
GW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
GB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// GStd[i]: Gradient of KL w.r.t standard deviation Std[i]
double * GStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Pearlmutter Forward and Backward Propagation ////////////////////
// RyLayer[i]: R{} of each layer's outputs, i.e. R{y_i}
// RxLayer[i]: R{} of each layer's pre-activated outputs, i.e. R{x_i}
// RGLayer[I]: R{} Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. R{d(KL)/d(x_i)}
double * RyLayer [NumLayers];
double * RxLayer [NumLayers];
double * RGLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
RyLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RxLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RGLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// RGW[i]: R{} Gradient of KL w.r.t. to Neural Network Weight W[i], i.e. R{d(KL)/d(W[i])}
// RGB[i]: R{} Gradient of KL w.r.t. to Neural Network Bias B[i], i.e. R{d(KL)/d(B[i])}
// There is one-to-one correspondence between: RGW[i] and W[i], RGB[i] and B[i]
double * RGW [NumLayers-1];
double * RGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
RGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
RGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// RStd[i]: R{} of Std[i], i.e. R{Std[i]}
// RGStd[i]: R{} Gradient of KL w.r.t. log standard deviation LogStd[i], i.e. R{d(KL)/d(LogStd[i])}
double * RStd = (double *) calloc(ActionSpaceDim, sizeof(double));
double * RGLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this std will be overwritten by the std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &LogStd[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Input Vector and Init Result Vector ////////////////////
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = Input[pos];
Result[pos] = 0;
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are reading the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
// Read Action
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Action[i*ActionSpaceDim+j]);
}
// Read Advantage
fscanf(DataFilePointer, "%lf", &Advantage[i]);
}
// Close Data File
fclose(DataFilePointer);
// Compute Average Sample Mean and Average Sample Mean Square
for (size_t i=0; i<NumSamples; ++i) {
for (size_t j=0; j<ActionSpaceDim; ++j) {
AvgSampleMean[j] += Mean[i*ActionSpaceDim+j];
AvgSampleMeanSq[j] += Mean[i*ActionSpaceDim+j] * Mean[i*ActionSpaceDim+j];
}
}
for (size_t j=0; j<ActionSpaceDim; ++j) {
AvgSampleMean[j] = AvgSampleMean[j] / (double)NumSamples;
AvgSampleMeanSq[j] = AvgSampleMeanSq[j] / (double)NumSamples;
}
//////////////////// Main Loop Over All Samples ////////////////////
// Measure Elapsed Time
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (size_t iter=0; iter<NumSamples; iter++) {
//////////////////// Ordinary Forward Propagation ////////////////////
// Assign Input Values
for (size_t i=0; i<ObservSpaceDim; ++i) Layer[0][i] = Observ[iter*ObservSpaceDim+i];
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculating pre-activated value for item[j] in next layer
Layer[i+1][j] = B[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*LayerSize[i+1]+j];
}
// Apply Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function
case 't': {Layer[i+1][j] = tanh(Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {Layer[i+1][j] = 0.1*Layer[i+1][j]; break;}
// sigmoid Activation Function
case 's': {Layer[i+1][j] = 1.0/(1+exp(-Layer[i+1][j])); break;}
// Default: Activation Function not supported
default: {
printf("[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Check whether the forward propagation output is correct
for (size_t i=0; i<ActionSpaceDim; ++i) {
double output = Layer[NumLayers-1][i];
double expected = Mean[iter*ActionSpaceDim+i];
double err = fabs( (output - expected) / expected ) * 100;
if (err>1) printf("out[%zu] = %e, mean = %e => %.4f%% Difference\n", i, output, expected, err);
}
//////////////////// Ordinary Backward Propagation ////////////////////
// Gradient Initialisation
// Assign the derivative of KL w.r.t. Mean (output values from the final layer) and Std
for (size_t i=0; i<ActionSpaceDim; ++i) {
GLayer[NumLayers-1][i] = 0;
GStd[i] = 0;
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
// Propagate from Layer[i] to Layer[i-1]
for (size_t j=0; j<LayerSize[i]; ++j) {
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {GLayer[i][j] = GLayer[i][j] * (1- Layer[i][j] * Layer[i][j]); break;}
// 0.1x Activation Function
case 'o': {GLayer[i][j] = 0.1 * GLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {GLayer[i][j] = GLayer[i][j] * Layer[i][j] * (1- Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer[%zu] is %c. Unsupported.\n", i, AcFunc[i]);
return -1;
}
}
// The derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
GB[i-1][j] = GLayer[i][j];
}
// Calculate the derivative w.r.t. to Weight
for (size_t j=0; j<LayerSize[i-1]; ++j) {
for (size_t k=0; k<LayerSize[i]; ++k) {
// The Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
GW[i-1][j*LayerSize[i]+k] = GLayer[i][k] * Layer[i-1][j];
}
}
// Calculate the derivative w.r.t. the output values from Layer[i]
for (size_t j=0; j<LayerSize[i-1]; ++j) {
GLayer[i-1][j] = 0;
for (size_t k=0; k<LayerSize[i]; ++k) {
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
GLayer[i-1][j] += GLayer[i][k] * W[i-1][j*LayerSize[i]+k];
}
}
}
//////////////////// Pearlmutter Forward Propagation ////////////////////
// Input is constant, so the R{} derivative is 0
for (size_t i=0; i<ObservSpaceDim; ++i) {
RyLayer[0][i] = 0;
RxLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
// Propagate from Layer[i] to Layer[i+1]
for (size_t j=0; j<LayerSize[i+1]; ++j) {
// Calculate R{x_j} in next layer
RxLayer[i+1][j] = VB[i][j];
for (size_t k=0; k<LayerSize[i]; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*LayerSize[i+1]+j];
RxLayer[i+1][j] += Layer[i][k] * VW[i][k*LayerSize[i+1]+j];
}
// Calculate R{y_j} in next layer, need to differentiate Activation Function
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {RyLayer[i+1][j] = RxLayer[i+1][j]; break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RyLayer[i+1][j] = RxLayer[i+1][j] * (1- Layer[i+1][j] * Layer[i+1][j]); break;}
// 0.1x Activation Function
case 'o': {RyLayer[i+1][j] = 0.1 * RxLayer[i+1][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1- Layer[i+1][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
return -1;
}
}
}
}
// Calculating R{Std}
// Remarks: R{Std} is w.r.t. to Std.
for (size_t i=0; i<ActionSpaceDim; ++i) {
RStd[i] = Std[i] * VLogStd[i];
}
//////////////////// Pearlmutter Backward Propagation ////////////////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
// Calculating R{} Gradient of KL w.r.t. LogStd, i.e. R{d(KL)/d(LogStd[i])}
for (size_t i=0; i<ActionSpaceDim; ++i) {
double StdSq = Std[i] * Std[i];
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i]/StdSq - 2*GLayer[NumLayers-1][i]/Std[i]*RStd[i];
RGLogStd[i] = 2*RStd[i]/Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
// Propagate from Layer[i] to Layer[i-1]
for (size_t j=0; j<LayerSize[i]; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {
RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j] - 2*Layer[i][j]*GLayer[i][j]*RxLayer[i][j];
break;
}
// 0.1x Activation Function
case 'o': {RGLayer[i][j] = 0.1 * RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {
RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]) + GLayer[i][j]*(1-2*Layer[i][j])*RxLayer[i][j];
break;
}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] Activation Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
return -1;
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight
for (size_t j=0; j<LayerSize[i-1]; ++j) {
for (size_t k=0; k<LayerSize[i]; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*LayerSize[i]+k] = Layer[i-1][j] * RGLayer[i][k] + RyLayer[i-1][j] * GLayer[i][k];
}
}
// Calculate the R{} derivative w.r.t. the output values from Layer[i]
for (size_t j=0; j<LayerSize[i-1]; ++j) {
RGLayer[i-1][j] = 0;
for (size_t k=0; k<LayerSize[i]; ++k) {
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
RGLayer[i-1][j] += VW[i-1][j*LayerSize[i]+k] * GLayer[i][k];
RGLayer[i-1][j] += W[i-1][j*LayerSize[i]+k] * RGLayer[i][k];
}
}
}
// Accumulate the Fisher-Vector Product to result
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
Result[pos] += RGLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
for (size_t i=0; i<pos; ++i) {
Result[i] = Result[i] / (double)NumSamples;
Result[i] += CG_Damping * Input[i];
}
// Report Computing Time
gettimeofday(&tv2, NULL);
double runtimeComp = ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
printf("[INFO] FVP Computing Time is %f seconds.\n", runtimeComp);
//////////////////// Clean Up ////////////////////
// clean up
for (size_t i=0; i<NumLayers; ++i) {
free(Layer[i]); free(GLayer[i]);
free(RyLayer[i]); free(RxLayer[i]); free(RGLayer[i]);
}
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]); free(GW[i]); free(RGW[i]);
free(B[i]); free(VB[i]); free(GB[i]); free(RGB[i]);
}
free(LogStd); free(VLogStd); free(RGLogStd);
free(GStd); free(RStd);
free(Observ); free(Mean); free(Std); free(Action); free(Advantage);
free(AvgSampleMean); free(AvgSampleMeanSq);
return runtimeComp;
}
double FVPFast (TRPOparam param, double *Result, double *Input, size_t NumThreads){
//////////////////// Remarks ////////////////////
// This function computes the Fisher-Vector Product using Pearlmutter Algorithm
// This version is customised to the case that KL is used as loss function
// Input: the vector to be multiplied with the Fisher Information Matrix
// Result: the Fisher-Vector Product
// Remarks: The length of Input and Result must be the number of all trainable parameters in the network
// Step1: Combined forward propagation
// Step2: Pearlmutter backward propagation
//////////////////// Read Parameters ////////////////////
// OpenMP Settings
omp_set_num_threads(NumThreads);
// Assign Parameters
const size_t NumLayers = param.NumLayers;
char * AcFunc = param.AcFunc;
size_t * LayerSize = param.LayerSize;
const size_t NumSamples = param.NumSamples;
char * ModelFile = param.ModelFile;
char * DataFile = param.DataFile;
const double CG_Damping = param.CG_Damping;
// Dimension of Observation Space
const size_t ObservSpaceDim = LayerSize[0];
// Dimension of Action Space
const size_t ActionSpaceDim = LayerSize[NumLayers-1];
// iterator when traversing through input vector and result vector
size_t pos;
//////////////////// Memory Allocation - Neural Network ////////////////////
// W[i]: Weight Matrix from Layer[i] to Layer[i+1]
// B[i]: Bias Vector from Layer[i] to Layer[i+1]
// Item (j,k) in W[i] refers to the weight from Neuron #j in Layer[i] to Neuron #k in Layer[i+1]
// Item B[k] is the bias of Neuron #k in Layer[i+1]
double * W [NumLayers-1];
double * B [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
W[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
B[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Memory Allocation - Input Vector ////////////////////
// The Input Vector is to be multiplied with the Hessian Matrix of KL to derive the Fisher Vector Product
// There is one-to-one correspondence between the input vector and all trainable parameters in the neural network
// As a result, the shape of the Input Vector is the same as that of the parameters in the model
// The only difference is that the Input Vector is stored in a flattened manner
// There is one-to-one correspondence between: VW[i] and W[i], VB[i] and B[i], VStd[i] and Std[i]
double * VW [NumLayers-1];
double * VB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
VW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
VB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
// Allocate Memory for Input Vector corresponding to LogStd
double * VLogStd = (double *) calloc(ActionSpaceDim, sizeof(double));
//////////////////// Memory Allocation - Simulation Data ////////////////////
// Allocate Memory for Observation and Probability Mean
// Observ: list of observations - corresponds to ob_no in modular_rl
// Mean: list of probablity mean values - corresponds to the 'mean' part of prob_np in modular_rl
// Remarks: due to the specific setting of the experienments in the TRPO paper,
// Std is the same for all samples in each simulation iteration,
// so we just allocate Std memory space for one sample and use it for all samples.
// The general case should be another vector of Std with size NumSamples*ActionSpaceDim
double * Observ = (double *) calloc(NumSamples*ObservSpaceDim, sizeof(double));
double * Mean = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Std = (double *) calloc(ActionSpaceDim, sizeof(double));
double * Action = (double *) calloc(NumSamples*ActionSpaceDim, sizeof(double));
double * Advantage = (double *) calloc(NumSamples, sizeof(double));
//////////////////// Memory Allocation - Ordinary Forward Propagation ////////////////////
// Layer[i] : Memory of each layer's outputs, i.e. y_i
double * Layer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
Layer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
//////////////////// Memory Allocation - Pearlmutter Forward and Backward Propagation ////////////////////
// RyLayer[i]: R{} of each layer's outputs, i.e. R{y_i}
// RxLayer[i]: R{} of each layer's pre-activated outputs, i.e. R{x_i}
// RGLayer[I]: R{} Gradient of KL w.r.t. the pre-activation values in Layer[i], i.e. R{d(KL)/d(x_i)}
double * RyLayer [NumLayers];
double * RxLayer [NumLayers];
double * RGLayer [NumLayers];
for (size_t i=0; i<NumLayers; ++i) {
RyLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RxLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
RGLayer[i] = (double *) calloc(LayerSize[i], sizeof(double));
}
// RGW[i]: R{} Gradient of KL w.r.t. to Neural Network Weight W[i], i.e. R{d(KL)/d(W[i])}
// RGB[i]: R{} Gradient of KL w.r.t. to Neural Network Bias B[i], i.e. R{d(KL)/d(B[i])}
// There is one-to-one correspondence between: RGW[i] and W[i], RGB[i] and B[i]
double * RGW [NumLayers-1];
double * RGB [NumLayers-1];
for (size_t i=0; i<NumLayers-1; ++i) {
RGW[i] = (double *) calloc(LayerSize[i]*LayerSize[i+1], sizeof(double));
RGB[i] = (double *) calloc(LayerSize[i+1], sizeof(double));
}
//////////////////// Load Neural Network ////////////////////
// Open Model File that contains Weights, Bias and std
FILE *ModelFilePointer = fopen(ModelFile, "r");
if (ModelFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Model File [%s]. \n", ModelFile);
return -1;
}
// Read Weights and Bias from file
for (size_t i=0; i<NumLayers-1; ++i) {
// Reading Weights W[i]: from Layer[i] to Layer[i+1]
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &W[i][j*nextLayerDim+k]);
}
}
// Reading Bias B[i]: from Layer[i] to Layer[i+1]
for (size_t k=0; k<nextLayerDim; ++k) {
fscanf(ModelFilePointer, "%lf", &B[i][k]);
}
}
// Read LogStd from file
// Remarks: actually this LogStd will be overwritten by the Std from the datafile
for (size_t k=0; k<ActionSpaceDim; ++k) {
fscanf(ModelFilePointer, "%lf", &Std[k]);
}
// Close Model File
fclose(ModelFilePointer);
//////////////////// Load Input Vector and Init Result Vector ////////////////////
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
VW[i][j*nextLayerDim+k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
VB[i][k] = Input[pos];
Result[pos] = 0;
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
VLogStd[k] = Input[pos];
Result[pos] = 0;
pos++;
}
//////////////////// Load Simulation Data ////////////////////
// Open Data File that contains Mean, std and Observation
FILE *DataFilePointer = fopen(DataFile, "r");
if (DataFilePointer==NULL) {
fprintf(stderr, "[ERROR] Cannot open Data File [%s]. \n", DataFile);
return -1;
}
// Read Mean, Std and Observation
// Remarks: Std is the same for all samples, and appears in every line in the data file
// so we are writing the same Std again and again to the same place.
for (size_t i=0; i<NumSamples; ++i) {
// Read Mean
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Mean[i*ActionSpaceDim+j]);
}
// Read Std
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Std[j]);
}
// Read Observation
for (size_t j=0; j<ObservSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Observ[i*ObservSpaceDim+j]);
}
// Read Action
for (size_t j=0; j<ActionSpaceDim; ++j) {
fscanf(DataFilePointer, "%lf", &Action[i*ActionSpaceDim+j]);
}
// Read Advantage
fscanf(DataFilePointer, "%lf", &Advantage[i]);
}
// Close Data File
fclose(DataFilePointer);
//////////////////// Main Loop Over All Samples ////////////////////
// Measure Elapsed Time
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (size_t iter=0; iter<NumSamples; iter++) {
//////////////////// Combined Forward Propagation ////////////////////
// Initialise the Input Layer
for (size_t i=0; i<ObservSpaceDim; ++i) {
Layer[0][i] = Observ[iter*ObservSpaceDim+i];
RxLayer[0][i] = 0;
RyLayer[0][i] = 0;
}
// Forward Propagation
for (size_t i=0; i<NumLayers-1; ++i) {
size_t CurrLayerSize = LayerSize[i];
size_t NextLayerSize = LayerSize[i+1];
size_t j, k;
// Propagate from Layer[i] to Layer[i+1]
#pragma omp parallel for private(j,k) shared(Layer, RxLayer, RyLayer, W, VW, B, VB, AcFunc) schedule(static)
for (j=0; j<NextLayerSize; ++j) {
// Initialise x_j and R{x_j} in next layer
// Here we just use y_j's memory space to store x_j temoporarily
Layer[i+1][j] = B[i][j];
RxLayer[i+1][j] = VB[i][j];
for (k=0; k<CurrLayerSize; ++k) {
// From Neuron #k in Layer[i] to Neuron #j in Layer[i+1]
Layer[i+1][j] += Layer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += RyLayer[i][k] * W[i][k*NextLayerSize+j];
RxLayer[i+1][j] += Layer[i][k] * VW[i][k*NextLayerSize+j];
}
// Calculate y_j and R{y_j} in next layer. Note that R{y_j} depends on y_j
switch (AcFunc[i+1]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {
RyLayer[i+1][j] = RxLayer[i+1][j];
break;
}
// tanh() Activation Function
case 't': {
Layer[i+1][j] = tanh(Layer[i+1][j]);
RyLayer[i+1][j] = RxLayer[i+1][j] * (1 - Layer[i+1][j] * Layer[i+1][j]);
break;
}
// 0.1x Activation Function
case 'o': {
Layer[i+1][j] = 0.1 * Layer[i+1][j];
RyLayer[i+1][j] = 0.1 * RxLayer[i+1][j];
break;
}
// sigmoid Activation Function
case 's': {
Layer[i+1][j] = 1.0 / ( 1 + exp(-Layer[i+1][j]) );
RyLayer[i+1][j] = RxLayer[i+1][j] * Layer[i+1][j] * (1 - Layer[i+1][j]);
break;
}
// Default: Activation Function not supported
default: {
printf("[ERROR] AC Function for Layer[%zu] is %c. Unsupported.\n", i+1, AcFunc[i+1]);
}
}
}
}
// Check whether the forward propagation output is correct
for (size_t i=0; i<ActionSpaceDim; ++i) {
double output = Layer[NumLayers-1][i];
double expected = Mean[iter*ActionSpaceDim+i];
double err = fabs( (output - expected) / expected ) * 100;
if (err>1) printf("out[%zu] = %e, mean = %e => %.4f%% Difference\n", i, output, expected, err);
}
//////////////////// Pearlmutter Backward Propagation ////////////////////
// Gradient Initialisation
// Calculating R{} Gradient of KL w.r.t. output values from the final layer, i.e. R{d(KL)/d(mean_i)}
for (size_t i=0; i<ActionSpaceDim; ++i) {
RGLayer[NumLayers-1][i] = RyLayer[NumLayers-1][i] / Std[i] / Std[i];
}
// Backward Propagation
for (size_t i=NumLayers-1; i>0; --i) {
size_t CurrLayerSize = LayerSize[i];
size_t PrevLayerSize = LayerSize[i-1];
size_t j, k;
// Propagate from Layer[i] to Layer[i-1]
#pragma omp parallel for private(j) shared(Layer, RGLayer, RGB) schedule(static)
for (j=0; j<CurrLayerSize; ++j) {
// Calculating R{} Gradient of KL w.r.t. pre-activated values in Layer[i], i.e. R{d(KL)/d(x_i)}
// Differentiate the activation function
switch (AcFunc[i]) {
// Linear Activation Function: Ac(x) = (x)
case 'l': {break;}
// tanh() Activation Function: tanh' = 1 - tanh^2
case 't': {RGLayer[i][j] = (1-Layer[i][j]*Layer[i][j])*RGLayer[i][j]; break;}
// 0.1x Activation Function
case 'o': {RGLayer[i][j] = 0.1 * RGLayer[i][j]; break;}
// sigmoid Activation Function: sigmoid' = sigmoid * (1 - sigmoid)
case 's': {RGLayer[i][j] = RGLayer[i][j]*Layer[i][j]*(1-Layer[i][j]); break;}
// Default: Activation Function not supported
default: {
fprintf(stderr, "[ERROR] AC Function for Layer [%zu] is %c. Unsupported.\n", i, AcFunc[i]);
}
}
// The R{} derivative w.r.t to Bias is the same as that w.r.t. the pre-activated value
RGB[i-1][j] = RGLayer[i][j];
}
// Calculate the R{} derivative w.r.t. to Weight and the output values from Layer[i]
#pragma omp parallel for private(j,k) shared(Layer, RGLayer, W, RGW) schedule(static)
for (j=0; j<PrevLayerSize; ++j) {
double temp = 0;
for (k=0; k<CurrLayerSize; ++k) {
// The R{} Derivative w.r.t. to the weight from Neuron #j in Layer[i-1] to Neuron #k in Layer[i]
RGW[i-1][j*CurrLayerSize+k] = Layer[i-1][j] * RGLayer[i][k];
// Accumulate the Gradient from Neuron #k in Layer[i] to Neuron #j in Layer[i-1]
temp += W[i-1][j*CurrLayerSize+k] * RGLayer[i][k];
}
RGLayer[i-1][j] = temp;
}
}
// Accumulate the Fisher-Vector Product to result
pos = 0;
for (size_t i=0; i<NumLayers-1; ++i) {
size_t curLayerDim = LayerSize[i];
size_t nextLayerDim = LayerSize[i+1];
for (size_t j=0; j<curLayerDim;++j) {
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGW[i][j*nextLayerDim+k];
pos++;
}
}
for (size_t k=0; k<nextLayerDim; ++k) {
Result[pos] += RGB[i][k];
pos++;
}
}
for (size_t k=0; k<ActionSpaceDim; ++k) {
Result[pos] += 2 * VLogStd[k];
pos++;
}
} // End of iteration over current sample
// Averaging Fisher Vector Product over the samples and apply CG Damping
#pragma omp parallel for
for (size_t i=0; i<pos; ++i) {
Result[i] = Result[i] / (double)NumSamples + CG_Damping * Input[i];
}
gettimeofday(&tv2, NULL);
double runtimeS = ((tv2.tv_sec-tv1.tv_sec) * (double)1E6 + (tv2.tv_usec-tv1.tv_usec)) / (double)1E6;
//////////////////// Clean Up ////////////////////
// clean up
for (size_t i=0; i<NumLayers; ++i) {
free(Layer[i]); free(RxLayer[i]); free(RyLayer[i]); free(RGLayer[i]);
}
for (size_t i=0; i<NumLayers-1; ++i) {
free(W[i]); free(VW[i]); free(RGW[i]);
free(B[i]); free(VB[i]); free(RGB[i]);
}
free(Observ); free(Mean); free(Std); free(Action); free(Advantage); free(VLogStd);
return runtimeS;
}
|
explicit_solver_strategy.h | //
// Authors:
// Miguel Angel Celigueta maceli@cimne.upc.edu
// Miquel Santasusana msantasusana@cimne.upc.edu
//
#if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY)
#define KRATOS_EXPLICIT_SOLVER_STRATEGY
// Project includes
#include "utilities/timer.h"
#include "custom_elements/Particle_Contact_Element.h"
#include "includes/variables.h"
#include "includes/deprecated_variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
#include <time.h>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
#define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER:::::
#include "includes/define.h"
#include "utilities/openmp_utils.h"
#include "includes/model_part.h"
#include "solving_strategies/strategies/implicit_solving_strategy.h"
#include "solving_strategies/schemes/scheme.h"
#include "custom_strategies/schemes/dem_integration_scheme.h"
#include "custom_utilities/create_and_destroy.h"
#include "custom_utilities/dem_fem_utilities.h"
#include "custom_utilities/GeometryFunctions.h"
#include "custom_utilities/inlet.h"
#include "custom_elements/cluster3D.h"
#include "custom_elements/rigid_body_element.h"
////Cfeng
#include "custom_utilities/dem_fem_search.h"
#include "custom_utilities/discrete_particle_configure.h"
#include "custom_utilities/node_configure.h"
#include "custom_utilities/rigid_face_geometrical_object_configure.h"
#ifdef USING_CGAL
#include <CGAL/spatial_sort.h>
#endif
/* Timer defines */
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos {
class ExplicitSolverSettings {
public:
KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings);
ExplicitSolverSettings() {
}
~ExplicitSolverSettings() {
}
ModelPart* r_model_part;
ModelPart* contact_model_part;
ModelPart* fem_model_part;
ModelPart* cluster_model_part;
ModelPart* inlet_model_part;
};
class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy {
public:
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ElementsArrayType::iterator ElementsIterator;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::NodesContainerType::ContainerType NodesContainerType;
typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType;
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType;
typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType;
typedef SpatialSearch::RadiusArrayType RadiusArrayType;
typedef SpatialSearch::DistanceType DistanceType;
typedef SpatialSearch::VectorDistanceType VectorDistanceType;
typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType;
typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType;
typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType;
typedef PropertiesContainerType::iterator PropertiesIterator;
typedef DiscreteParticleConfigure<3> ElementConfigureType;
typedef NodeConfigure<3> NodeConfigureType;
typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType;
typedef Variable<double> ComponentOf3ComponentsVariableType;
/// Pointer definition of ExplicitSolverStrategy
KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy);
ExplicitSolverStrategy() {
}
ExplicitSolverStrategy(ExplicitSolverSettings& settings,
const double max_delta_time,
const int n_step_search,
const double safety_factor,
const int delta_option,
ParticleCreatorDestructor::Pointer p_creator_destructor,
DEM_FEM_Search::Pointer p_dem_fem_search,
SpatialSearch::Pointer pSpSearch,
Parameters strategy_parameters) {
mParameters = strategy_parameters;
mDeltaOption = delta_option;
mpParticleCreatorDestructor = p_creator_destructor;
mpDemFemSearch = p_dem_fem_search;
mpSpSearch = pSpSearch;
//Also checks old flag name for backward compatibility issues.
if(mParameters["do_search_dem_neighbours"].GetBool()) {
mDoSearchNeighbourElements = true;
} else mDoSearchNeighbourElements = false;
p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements);
if(mParameters["do_search_fem_neighbours"].GetBool()) mDoSearchNeighbourFEMElements = true;
else mDoSearchNeighbourFEMElements = false;
mMaxTimeStep = max_delta_time;
mNStepSearch = n_step_search;
mSafetyFactor = safety_factor;
mpDem_model_part = &(*(settings.r_model_part));
KRATOS_ERROR_IF(mpDem_model_part == NULL) << "Undefined settings.r_model_part in ExplicitSolverStrategy constructor" << std::endl;
mpContact_model_part = &(*(settings.contact_model_part));
KRATOS_ERROR_IF(mpContact_model_part == NULL) << "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor" << std::endl;
mpFem_model_part = &(*(settings.fem_model_part));
KRATOS_ERROR_IF(mpFem_model_part == NULL) << "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor" << std::endl;
mpCluster_model_part = &(*(settings.cluster_model_part));
KRATOS_ERROR_IF(mpCluster_model_part == NULL) << "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor" << std::endl;
mpInlet_model_part = &(*(settings.inlet_model_part));
KRATOS_ERROR_IF(mpInlet_model_part == NULL) << "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor" << std::endl;
if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true;
else mRemoveBallsInitiallyTouchingWallsOption = false;
}
/// Destructor.
virtual ~ExplicitSolverStrategy() {
//Timer::SetOuputFile("TimesPartialRelease");
//Timer::PrintTimingInformation();
}
struct LessX {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];}
};
struct LessY {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];}
};
struct LessZ {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];}
};
struct SpatialSortingTraits {
typedef SphericParticle* Point_2;
typedef LessX Less_x_2;
typedef LessY Less_y_2;
typedef LessZ Less_z_2;
Less_x_2 less_x_2_object() const {return Less_x_2();}
Less_y_2 less_y_2_object() const {return Less_y_2();}
Less_z_2 less_z_2_object() const { return Less_z_2();}
};
#ifdef USING_CGAL
void ReorderParticles() {
SpatialSortingTraits sst;
CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst);
}
#endif
template <class T>
void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){
KRATOS_TRY
rCustomListOfParticles.resize(pElements.size());
#pragma omp parallel for
for (int k = 0; k < (int)pElements.size(); k++){
ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k;
T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it));
rCustomListOfParticles[k] = spheric_particle;
}
return;
KRATOS_CATCH("")
}
void RebuildListOfDiscontinuumSphericParticles() {
RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles);
}
void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles);
void SendProcessInfoToClustersModelPart();
void UpdateMaxIdOfCreatorDestructor();
void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles);
virtual void Initialize();
virtual void AttachSpheresToStickyWalls();
virtual void DisplayThreadInfo();
double CalculateMaxInletTimeStep();
virtual void InitializeClusters();
virtual void GetClustersForce();
virtual void GetRigidBodyElementsForce();
virtual double SolveSolutionStep();
void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true);
void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ;
virtual void ForceOperations(ModelPart& r_model_part);
void GetForce();
void FastGetForce();
virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0);
void InitializeSolutionStep();
virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true);
virtual void FinalizeSolutionStep();
void InitializeElements();
void InitializeDEMElements();
void InitializeFEMElements();
//void InitializeRigidBodyElements();
void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part);
void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart);
void ComputeNodalArea();
void ComputeNormalPressureVectorField();
virtual void CalculateConditionsRHSAndAdd();
void ClearFEMForces();
void CalculateNodalPressuresAndStressesOnWalls();
void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array);
void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array);
void ResetPrescribedMotionFlagsRespectingImposedDofs();
void ApplyPrescribedBoundaryConditions();
void ApplyInitialConditions();
virtual void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0);
void SetNormalRadiiOnAllParticles(ModelPart& r_model_part);
virtual void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0);
virtual void SearchNeighbours();
virtual void ComputeNewNeighboursHistoricalData();
virtual void CreateContactElements();
void InitializeContactElements();
// void ContactInitializeSolutionStep();
void PrepareContactElementsForPrinting();
virtual void ComputeNewRigidFaceNeighboursHistoricalData();
virtual void SearchRigidFaceNeighbours();
void CheckHierarchyWithCurrentNeighbours();
/* This should work only with one iteration, but it with mpi does not */
void CalculateInitialMaxIndentations(const ProcessInfo& r_process_info);
void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part);
void PrepareElementsForPrinting();
void SynchronizeHistoricalVariables(ModelPart& r_model_part);
void SynchronizeRHS(ModelPart& r_model_part);
void CleanEnergies();
void Check_MPI(bool& has_mpi);
ModelPart& GetModelPart() { return (*mpDem_model_part);}
ModelPart& GetFemModelPart() { return (*mpFem_model_part);}
ModelPart& GetContactModelPart() { return (*mpContact_model_part);}
ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);}
ModelPart& GetInletModelPart() { return (*mpInlet_model_part);}
ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);}
VectorResultElementsContainerType& GetResults() { return (mResults);}
VectorDistanceType& GetResultsDistances() { return (mResultsDistances);}
RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);}
int& GetNStepSearch() { return (mNStepSearch);}
int& GetSearchControl() { return mSearchControl;}
int& GetNumberOfThreads() { return (mNumberOfThreads);}
double& GetMaxTimeStep() { return (mMaxTimeStep);}
double& GetSafetyFactor() { return (mSafetyFactor);}
int& GetDeltaOption() { return (mDeltaOption);}
ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);}
SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);}
VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);}
VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);}
DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);}
virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();}
virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) {
return r_model_part.Elements();
}
protected:
Parameters mParameters;
bool mRemoveBallsInitiallyTouchingWallsOption;
VectorResultElementsContainerType mResults;
VectorDistanceType mResultsDistances;
RadiusArrayType mArrayOfAmplifiedRadii;
int mNStepSearch;
int mSearchControl;
int mNumberOfThreads;
double mMaxTimeStep;
double mSafetyFactor;
int mDeltaOption;
ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor;
DEM_FEM_Search::Pointer mpDemFemSearch;
SpatialSearch::Pointer mpSpSearch;
bool mDoSearchNeighbourElements;
bool mDoSearchNeighbourFEMElements;
VectorResultConditionsContainerType mRigidFaceResults;
VectorDistanceType mRigidFaceResultsDistances;
ModelPart *mpFem_model_part;
ModelPart *mpDem_model_part;
ModelPart *mpInlet_model_part;
ModelPart *mpContact_model_part;
ModelPart *mpCluster_model_part;
ModelPart *mpRigidBody_model_part;
std::vector<SphericParticle*> mListOfSphericParticles;
std::vector<SphericParticle*> mListOfGhostSphericParticles;
}; // Class ExplicitSolverStrategy
} // namespace Kratos.
#endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
|
data.c | /*
* Copyright (c) 2017, Marcel Heing-Becker, University of Hamburg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include "raw_wrapper.h"
#include "sha256.h"
int8_t *inline_buffer = NULL;
size_t buffer_position = 0;
size_t buffer_size = 0;
void (*replay_filter)(void*, size_t) = NULL;
int32_t rw_create_capture_rank_dir(int rank, int8_t *path_buffer) {
struct stat stat_buf;
int8_t out_dir[] = "out";
int32_t offset = 3;
if (0 != stat(out_dir, &stat_buf)) {
if (0 != mkdir(out_dir, 0755)) {
return -1;
}
}
memcpy(path_buffer, out_dir, 3);
memcpy(&(path_buffer[offset]), "/", 1);
offset += 1;
sprintf(&(path_buffer[offset]), "%d", rank);
if (0 != stat(path_buffer, &stat_buf)) {
if (0 != mkdir(path_buffer, 0755)) {
return -1;
}
}
offset = strlen(path_buffer);
return offset;
}
void rw_mpi_request_to_unique_string(MPI_Request *request, int8_t *out_buffer) {
int8_t internal_buffer[32] = {0};
sha256((void*) request, sizeof(MPI_Request), internal_buffer);
for (size_t i = 0; i < SHA256_BYTES / sizeof(uint32_t); ++i) {
size_t offset = sizeof(uint32_t) * i;
sprintf(&(out_buffer[offset * 2]), "%08x", *(uint32_t*)&(internal_buffer[offset]));
}
}
void rw_replay_filter_apply (void *buf, size_t size) {
if (NULL != replay_filter) {
replay_filter(buf, size);
}
}
void rw_tape_free () {
if (NULL != inline_buffer) {
free(inline_buffer);
inline_buffer = NULL;
}
}
size_t rw_tape_read_bytes(void *buf, size_t total_size) {
if (NULL != inline_buffer && total_size <= buffer_size - buffer_position) {
memcpy(buf, inline_buffer + buffer_position, total_size);
rw_replay_filter_apply(buf, total_size);
#pragma omp atomic write
buffer_position = buffer_position + total_size;
return total_size;
} else {
return 0;
}
}
|
app.c | /**
* Christina Giannoula
* cgiannoula: christina.giann@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <dpu.h>
#include <dpu_log.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "../support/common.h"
#include "../support/matrix.h"
#include "../support/params.h"
#include "../support/partition.h"
#include "../support/timer.h"
#include "../support/utils.h"
// Define the DPU Binary path as DPU_BINARY here.
#ifndef DPU_BINARY
#define DPU_BINARY "./bin/spmv_dpu"
#endif
#define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB
/*
* Main Structures:
* 1. Matrices
* 2. Input vector
* 3. Output vector
* 4. Help structures for data partitioning
*/
static struct DBCSRMatrix* A;
static struct DCSRMatrix* B;
static struct COOMatrix* C;
static val_dt* x;
static val_dt* y;
static struct partition_info_t *part_info;
/**
* @brief Specific information for each DPU
*/
struct dpu_info_t {
uint32_t block_rows_per_dpu;
uint32_t prev_block_rows_dpu;
uint32_t blocks;
uint32_t blocks_pad;
uint32_t merge;
};
struct dpu_info_t *dpu_info;
/**
* @brief find the dpus_per_vert_partition
* @param factor n to create partitions
* @param vertical_partitions
* @param/return horz_partitions
*/
void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) {
uint32_t dpus_per_vert_partition = n / vert_partitions;
*horz_partitions = dpus_per_vert_partition;
}
/**
* @brief initialize input vector
* @param pointer to input vector and vector size
*/
void init_vector(val_dt* vec, uint32_t size) {
for(unsigned int i = 0; i < size; ++i) {
vec[i] = (val_dt) (i%4+1);
}
}
/**
* @brief compute output in the host CPU
*/
static void spmv_host(val_dt* y, struct DBCSRMatrix *A, val_dt* x) {
uint64_t total_blocks = 0;
for (uint32_t r = 0; r < A->horz_partitions; r++) {
for (uint32_t c = 0; c < A->vert_partitions; c++) {
uint32_t ptr_offset = (r * A->vert_partitions + c) * (A->num_block_rows + 1);
for(uint64_t n=0; n < A->num_block_rows; n++) {
for(uint64_t i=A->browptr[ptr_offset + n]; i<A->browptr[ptr_offset + n+1]; i++){
uint64_t j = A->bcolind[total_blocks + i];
for(uint64_t blr=0; blr < A->row_block_size; blr++){
val_dt acc = 0;
for(uint64_t blc=0; blc < A->col_block_size; blc++) {
acc += A->bval[(total_blocks + i) * A->col_block_size * A->row_block_size + blr * A->col_block_size + blc] * x[c * A->tile_width + j * A->col_block_size + blc];
}
y[r * A->tile_height + n * A->row_block_size + blr] += acc;
}
}
}
total_blocks += A->blocks_per_partition[r * A->vert_partitions + c];
}
}
}
/**
* @brief main of the host application
*/
int main(int argc, char **argv) {
struct Params p = input_params(argc, argv);
struct dpu_set_t dpu_set, dpu;
uint32_t nr_of_dpus;
// Allocate DPUs and load binary
DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set));
DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL));
DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus));
printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus);
printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS);
unsigned int i;
// Initialize input data
C = readCOOMatrix(p.fileName);
sortCOOMatrix(C);
uint32_t horz_partitions = 0;
uint32_t vert_partitions = p.vert_partitions;
find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions);
printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions);
B = coo2dcsr(C, horz_partitions, vert_partitions);
freeCOOMatrix(C);
A = dcsr2dbcsr(B, p.row_blsize, p.col_blsize);
countNNZperBlockDBCSRMatrix(A);
freeDCSRMatrix(B);
// Initialize partition data
part_info = partition_init(nr_of_dpus, NR_TASKLETS);
// Initialize help data - Padding needed
uint32_t ncols_pad = A->vert_partitions * A->tile_width + A->col_block_size;
uint32_t tile_width_pad = A->num_block_cols * A->col_block_size;
uint32_t nrows_pad = A->horz_partitions * A->tile_height + A->row_block_size;
if (ncols_pad % (8 / byte_dt) != 0)
ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt)));
if (tile_width_pad % (8 / byte_dt) != 0)
tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt)));
#if INT8
if (tile_width_pad % 2 != 0)
tile_width_pad++;
#endif
if (nrows_pad % (8 / byte_dt) != 0)
nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt)));
// Allocate input vector
x = (val_dt *) malloc(ncols_pad * sizeof(val_dt));
// Initialize input vector with arbitrary data
init_vector(x, ncols_pad);
// Initialize help data
dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t));
dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t));
// Max limits for parallel transfers
uint64_t max_block_rows_per_dpu = 0;
uint64_t max_blocks_per_dpu = 0;
// Timer fore measurements
Timer timer;
i = 0;
uint32_t total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
// Find padding for block rows and non-zero elements needed for CPU-DPU transfers
uint64_t block_rows_per_dpu = A->num_block_rows+1;
uint64_t prev_block_rows_dpu = 0;
if (block_rows_per_dpu > max_block_rows_per_dpu)
max_block_rows_per_dpu = block_rows_per_dpu;
unsigned int blocks, blocks_pad;
blocks = A->blocks_per_partition[i];
if (blocks % 2 != 0)
blocks_pad = blocks + 1;
else
blocks_pad = blocks;
if (blocks_pad > max_blocks_per_dpu)
max_blocks_per_dpu = blocks_pad;
// Keep information per DPU
dpu_info[i].block_rows_per_dpu = block_rows_per_dpu;
dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu;
dpu_info[i].blocks = blocks;
dpu_info[i].blocks_pad = blocks_pad;
// Find input arguments per DPU
input_args[i].block_rows = block_rows_per_dpu;
input_args[i].tcols = tile_width_pad;
input_args[i].row_block_size = A->row_block_size;
input_args[i].col_block_size = A->col_block_size;
//input_args[i].blocks = blocks;
#if BLNC_TSKLT_BLOCK
// Load-balance blocks across tasklets
partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#else
// Load-balance nnzs across tasklets
partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks);
#endif
uint32_t t;
for (t = 0; t < NR_TASKLETS; t++) {
// Find input arguments per DPU
input_args[i].start_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + t];
input_args[i].end_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + (t+1)];
}
total_blocks += A->blocks_per_partition[i];
}
// Initialization for parallel transfers
if (max_block_rows_per_dpu % 2 != 0)
max_block_rows_per_dpu++;
if (max_blocks_per_dpu % 2 != 0)
max_blocks_per_dpu++;
// Re-allocations for padding needed
A->browptr = (uint32_t *) realloc(A->browptr, (max_block_rows_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->bcolind = (uint32_t *) realloc(A->bcolind, (max_blocks_per_dpu * nr_of_dpus * sizeof(uint32_t)));
A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt)));
y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt));
// Count total number of bytes to be transfered in MRAM of DPU
unsigned long int total_bytes;
total_bytes = ((max_block_rows_per_dpu) * sizeof(uint32_t)) + (max_blocks_per_dpu * sizeof(uint32_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt));
assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size");
// Copy input arguments to DPUs
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
input_args[i].max_block_rows = max_block_rows_per_dpu;
input_args[i].max_blocks = max_blocks_per_dpu;
DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT));
// Copy input matrix to DPUs
startTimer(&timer, 0);
// Copy Browptr
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->browptr + i * (A->num_block_rows + 1)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_block_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Bcolind
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bcolind + total_blocks));
total_blocks += A->blocks_per_partition[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT));
// Copy Bvalues
i = 0;
total_blocks = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size)));
total_blocks += A->blocks_per_partition[i];
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t) + max_blocks_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 0);
// Copy input vector to DPUs
startTimer(&timer, 1);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
uint32_t tile_vert_indx = i % A->vert_partitions;
DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 1);
// Run kernel on DPUs
startTimer(&timer, 2);
DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS));
stopTimer(&timer, 2);
#if LOG
// Display DPU Log (defalut: disabled)
DPU_FOREACH(dpu_set, dpu) {
DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout));
}
#endif
// Retrieve results for output vector from DPUs
startTimer(&timer, 3);
i = 0;
DPU_FOREACH(dpu_set, dpu, i) {
DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size)));
}
DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT));
stopTimer(&timer, 3);
// Merge partial results to the host CPU
startTimer(&timer, 4);
uint32_t r, c, t;
#pragma omp parallel for num_threads(p.nthreads) shared(A, y, max_block_rows_per_dpu) private(r,c,t) collapse(2)
for (r = 0; r < A->horz_partitions; r++) {
for (t = 0; t < A->tile_height; t++) {
for (c = 1; c < A->vert_partitions; c++) {
y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + t] += y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + c * max_block_rows_per_dpu * A->row_block_size + t];
}
}
}
stopTimer(&timer, 4);
// Print timing results
printf("\n");
printf("Load Matrix ");
printTimer(&timer, 0);
printf("Load Input Vector ");
printTimer(&timer, 1);
printf("Kernel ");
printTimer(&timer, 2);
printf("Retrieve Output Vector ");
printTimer(&timer, 3);
printf("Merge Partial Results ");
printTimer(&timer, 4);
printf("\n\n");
#if CHECK_CORR
// Check output
val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt));
spmv_host(y_host, A, x);
bool status = true;
i = 0;
for (uint32_t r = 0; r < A->horz_partitions; r++) {
for (uint32_t t = 0; t < A->tile_height; t++) {
if((r * A->tile_height + t < A->nrows) && y_host[i] != y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + t]) {
status = false;
}
i++;
}
}
if (status) {
printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n");
} else {
printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n");
}
free(y_host);
#endif
// Deallocation
freeDBCSRMatrix(A);
free(x);
free(y);
partition_free(part_info);
DPU_ASSERT(dpu_free(dpu_set));
return 0;
}
|
CnTriMeshRenderer.c | /*
* $Id: CnTriMeshRenderer.c,v 1.17 2010-03-27 18:58:25 dbrown Exp $
*/
/************************************************************************
* *
* Copyright (C) 1992 *
* University Corporation for Atmospheric Research *
* All Rights Reserved *
* *
************************************************************************/
/*
* File:
*
* Author: David I. Brown
* National Center for Atmospheric Research
* PO 3000, Boulder, Colorado
*
* Date: Thu Apr 23 12:01:04 MDT 1998
*
* Description:
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <ncarg/hlu/hluutil.h>
#include <ncarg/hlu/CnTriMeshRendererP.h>
#include <ncarg/hlu/WorkstationI.h>
#include <ncarg/hlu/MapTransObj.h>
#include <ncarg/hlu/IrregularTransObjP.h>
#include <ncarg/hlu/TriMeshTransObj.h>
#include <ncarg/hlu/WorkspaceP.h>
#include <ncarg/hlu/color.h>
#ifdef BuildTRIANGLE
#define REAL double
#include <ncarg/hlu/triangle.h>
#endif
#define Oset(field) \
NhlOffset(NhlCnTriMeshRendererLayerRec,cntrimeshrenderer.field)
static NhlResource resources[] = {
{NhlNtriMeshUpdateMode,NhlCtriMeshUpdateMode,NhlTInteger,
sizeof(int),Oset(update_mode),NhlTImmediate,
_NhlUSET((NhlPointer) TRIMESH_NEWMESH),_NhlRES_PRIVATE,NULL}
};
static NhlErrorTypes CnTriMeshRendererInitialize(
#if NhlNeedProto
NhlClass class,
NhlLayer req,
NhlLayer new,
_NhlArgList args,
int num_args
#endif
);
static NhlErrorTypes CnTriMeshRendererDestroy(
#if NhlNeedProto
NhlLayer /* inst */
#endif
);
static NhlErrorTypes CnTriMeshRender(
#if NhlNeedProto
NhlLayer instance,
NhlContourPlotLayer cnl,
NhlDrawOrder order,
NhlString entry_name
#endif
);
extern int (_NHLCALLF(ctdrpl,CTDRPL))(
#if NhlNeedProto
float *xcs,
float *ycs,
int *ncs,
int *iai,
int *iag,
int *nai
#endif
);
extern int (_NHLCALLF(hluctfill,HLUCTFILL))(
#if NhlNeedProto
float *xcs,
float *ycs,
int *ncs,
int *iai,
int *iag,
int *nai
#endif
);
extern void (_NHLCALLF(hluctscae,HLUCTSCAE))(
#if NhlNeedProto
int *icra,
int *ica1,
int *icam,
int *ican,
float *xcpf,
float *ycpf,
float *xcqf,
float *ycqf,
int *ind1,
int *ind2,
int *icaf,
int *iaid
#endif
);
extern void (_NHLCALLF(hluctchcl,HLUCTCHCL))(
#if NhlNeedProto
int *iflg
#endif
);
extern void (_NHLCALLF(hluctchhl,HLUCTCHHL))(
#if NhlNeedProto
int *iflg
#endif
);
extern void (_NHLCALLF(hluctchll,HLUCTCHLL))(
#if NhlNeedProto
int *iflg
#endif
);
extern void (_NHLCALLF(hluctmxyz,HLUCTMXYZ))(
#if NhlNeedProto
int *imap,
float *xinp,
float *yinp,
float *zinp,
float *xotp,
float *yotp
#endif
);
static int (_NHLCALLF(rtmi,RTMI))(
#if NhlNeedProto
int *idim,
int *jdim,
int *iini,
int *jini,
int *iino,
int *jino
#endif
);
static void load_hluct_routines(
#if NhlNeedProto
NhlBoolean flag
#endif
);
extern void (_NHLCALLF(trmrgr,TRMRGR))(
int *idim,
int *jdim,
float *rlon,
float *rlat,
float *rdat,
int *iscr,
float *missing_val,
float *rpnt,
int *mpnt,
int *npnt,
int *lopn,
int *iedg,
int *medg,
int *edg,
int *loen,
int *itri,
int *mtri,
int *ntri,
int *lotn
);
extern void (_NHLCALLF(hlucttmtl,HLUCTTMTL))(
int *kbuf,
float *tbuf,
int *mbuf,
int *nbuf,
int *ippp,
int *mnop,
int *nppp,
int *ippe,
int *mnoe,
int *nppe,
float *rpnt,
int *mpnt,
int *npnt,
int *lopn,
int *iedg,
int *medg,
int *nedg,
int *loen,
int *itri,
int *mtri,
int *ntri,
int *lotn
);
extern void (_NHLCALLF(ctscae,CTSCAE))(
int *icra,
int *ica1,
int *icam,
int *ican,
float *xcpf,
float *ycpf,
float *xcqf,
float *ycqf,
int *ind1,
int *ind2,
int *icaf,
int *iaid
);
extern void (_NHLCALLF(ctchcl,CTCHCL))(
int *iflg
);
extern void (_NHLCALLF(dprset,DPRSET))(
void
);
extern void (_NHLCALLF(ctchhl,CTCHHL))(
int *iflg
);
extern void (_NHLCALLF(ctchll,CTCHLL))(
int *iflg
);
extern void (_NHLCALLF(ctmxyz,CTMXYZ))(
int *imap,
float *xinp,
float *yinp,
float *zinp,
float *xotp,
float *yotp
);
static NhlErrorTypes CnTriMeshWriteCellData
#if NhlNeedProto
(
float *rpnt,
int *iedg,
int *itri,
int icam,
int ican,
float xcpf,
float ycpf,
float xcqf,
float ycqf,
char *entry_name
#endif
);
static NhlIsoLine *CnTriMeshGetIsoLines(
#if NhlNeedProto
NhlLayer instance,
NhlContourPlotLayer cnl,
int n_levels,
float *levels,
NhlString entry_name
#endif
);
static void FreeTriBlockContents (
TriBlock *tri_block,
int *count
);
NhlCnTriMeshRendererClassRec NhlcnTriMeshRendererClassRec = {
{
/* class_name */ "cnTriMeshRendererClass",
/* nrm_class */ NrmNULLQUARK,
/* layer_size */ sizeof(NhlCnTriMeshRendererLayerRec),
/* class_inited */ False,
/* superclass */ (NhlClass)&NhlobjClassRec,
/* cvt_table */ NULL,
/* layer_resources */ resources,
/* num_resources */ NhlNumber(resources),
/* all_resources */ NULL,
/* callbacks */ NULL,
/* num_callbacks */ 0,
/* class_callbacks */ NULL,
/* num_class_callbacks */ 0,
/* class_part_initialize */ NULL,
/* class_initialize */ NULL,
/* layer_initialize */ CnTriMeshRendererInitialize,
/* layer_set_values */ NULL,
/* layer_set_values_hook */ NULL,
/* layer_get_values */ NULL,
/* layer_reparent */ NULL,
/* layer_destroy */ CnTriMeshRendererDestroy,
},
{
/* render */ CnTriMeshRender,
/* get_isolines */ CnTriMeshGetIsoLines
},
{
/* foo */ 0
}
};
NhlClass NhlcnTriMeshRendererClass = (NhlClass)&NhlcnTriMeshRendererClassRec;
typedef enum {
cnInt,
cnFloat,
cnString
} _cnParamType;
typedef struct _cnCt_Params {
NhlString name;
_cnParamType type;
} cnCt_Params;
static cnCt_Params Ct_Params[] = {
{"HCL", cnFloat},
{"HCS", cnFloat},
{"HCF", cnInt},
{"HLX", cnInt},
{"HLY", cnInt},
{"IWM", cnInt},
{"PC1", cnFloat},
{"PC2", cnFloat},
{"PC3", cnFloat},
{"PC4", cnFloat},
{"PC5", cnFloat},
{"PC6", cnFloat},
{"PIC", cnInt},
{"PIE", cnInt},
{"PW1", cnFloat},
{"PW2", cnFloat},
{"PW3", cnFloat},
{"PW4", cnFloat},
{"RC1", cnFloat},
{"RC2", cnFloat},
{"RC3", cnFloat},
{"RWC", cnInt},
{"RWG", cnInt},
{"RWM", cnInt},
};
static float LowLabelFactor = 1.0;
#define NhlDASHBUFSIZE 128
static NhlContourPlotLayer Cnl = NULL;
static NhlContourPlotLayerPart *Cnp = NULL;
static NhlCnTriMeshRendererLayerPart *Tmp = NULL;
static int Lopn = 4;
static int Loen = 5;
static int Lotn = 4;
static TriBlock *Tbp;
/*
* Function: rtmi_
*
* Description:
*
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
static int (_NHLCALLF(rtmi,RTMI))
#if NhlNeedProto
(
int *idim,
int *jdim,
int *iini,
int *jini,
int *iino,
int *jino
)
#else
(idim,jdim,iini,jini,iino,jino)
int *idim;
int *jdim;
int *iini;
int *jini;
int *iino;
int *jino;
#endif
{
/* assume simplest situation for now */
*jino = *jini;
*iino = *iini;
#if 0
*iino = *iini;
if (*jini == *jdim) {
if (*iini > *idim / 2 && *iini < *idim - 1) {
*iino = *idim - *iini - 1;
}
else if (*iini == *idim) {
*iino = 1;
}
else {
*iino = *iini;
}
printf("ini %d outi %d\n",*iini, *iino);
}
#endif
return 0;
}
static NhlErrorTypes BuildTriangularMesh
#if NhlNeedProto
(
NhlCnTriMeshRendererLayerPart *tmp,
NhlContourPlotLayer cnl,
NhlString entry_name
)
#else
(tmp,cnl,entry_name)
NhlCnTriMeshRendererLayerPart *tmp;
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
int *iscr;
float missing_val;
int coords_alloced = 0;
int idim = cnp->sfp->fast_len;
int jdim = cnp->sfp->slow_len;
int idm1 = cnp->sfp->fast_len - 1;
int jdm1 = cnp->sfp->slow_len - 1;
int mnop = idim * jdim;
int mnoe = 3 *idm1 * jdm1 + idm1 + jdm1;
int mnot = 2 * idm1 * jdm1;
int mpnt = mnop * Lopn;
int medg = mnoe * Loen;
int mtri = mnot * Lotn;
float *rpnt;
int *iedg, *itri;
int npnt,nedg,ntri;
float *rlat,*rlon,*rdat;
int i,j;
TriBlock *tbp;
FreeTriBlockContents(tmp->tri_block,&(tmp->nblocks));
iscr = NhlMalloc(4 * idim * jdim * sizeof(int));
rpnt = NhlMalloc(mpnt * sizeof(float));
iedg = NhlMalloc(medg * sizeof(int));
itri = NhlMalloc(mtri * sizeof(int));
if (! iscr || ! rpnt || ! iedg || ! itri) {
NHLPERROR((NhlFATAL,ENOMEM,NULL));
return NhlFATAL;
}
missing_val = cnp->sfp->missing_value_set ?
cnp->sfp->missing_value : -FLT_MAX;
if (cnp->sfp->x_arr && cnp->sfp->x_arr->num_dimensions == 2) {
/*
* since the triangular mesh algorithm does not handle
* coordinates not defined at the grid points we
* must interpolate bounding coordinates to the
* cell centers as best we can.
*/
if (! (cnp->sfp->xc_is_bounds || cnp->sfp->yc_is_bounds)) {
rlat = (float*)cnp->sfp->y_arr->data;
rlon = (float*)cnp->sfp->x_arr->data;
}
else if (cnp->sfp->xc_is_bounds && cnp->sfp->yc_is_bounds) {
int jd, jbd, jbdp;
float *tlat, *tlon;
int cidim = idim + 1;
tlat = (float*)cnp->sfp->y_arr->data;
tlon = (float*)cnp->sfp->x_arr->data;
coords_alloced = 1;
rlat = NhlMalloc(idim * jdim * sizeof(float));
rlon = NhlMalloc(idim * jdim * sizeof(float));
for (j = 0; j < jdim; j++) {
jd = j * idim;
jbd = j * cidim;
jbdp = (j+1) * cidim;
for (i = 0; i < idim; i++) {
*(rlon+jd+i) =
(*(tlon+jbd+i) + *(tlon+jbd+i+1) +
*(tlon+jbdp+i) + *(tlon+jbdp+i+1))
/ 4.0;
*(rlat+jd+i) =
(*(tlat+jbd+i) + *(tlat+jbd+i+1) +
*(tlat+jbdp+i) + *(tlat+jbdp+i+1))
/ 4.0;
}
}
}
else if (cnp->sfp->xc_is_bounds) {
int jd, jbd;
float *tlat, *tlon;
int cidim = idim + 1;
tlat = (float*)cnp->sfp->y_arr->data;
tlon = (float*)cnp->sfp->x_arr->data;
coords_alloced = 1;
rlat = NhlMalloc(idim * jdim * sizeof(float));
rlon = NhlMalloc(idim * jdim * sizeof(float));
for (j = 0; j < jdim; j++) {
jd = j * idim;
jbd = j * cidim;
for (i = 0; i < idim; i++) {
*(rlon+jd+i) =
(*(tlon+jbd+i) + *(tlon+jbd+i+1)) / 2.0;
*(rlat+jd+i) =
(*(tlat+jbd+i) + *(tlat+jbd+i+1)) / 2.0;
}
}
}
else if (cnp->sfp->yc_is_bounds) {
int jd, jdp;
float *tlat, *tlon;
tlat = (float*)cnp->sfp->y_arr->data;
tlon = (float*)cnp->sfp->x_arr->data;
coords_alloced = 1;
rlat = NhlMalloc(idim * jdim * sizeof(float));
rlon = NhlMalloc(idim * jdim * sizeof(float));
for (j = 0; j < jdim; j++) {
jd = j * idim;
jdp = (j + 1) * idim;
for (i = 0; i < idim; i++) {
*(rlon+jd+i) =
(*(tlon+jd+i) + *(tlon+jdp+i)) / 2.0;
*(rlat+jd+i) =
(*(tlat+jd+i) + *(tlat+jdp+i)) / 2.0;
}
}
}
}
else {
rlat = NhlMalloc(idim * jdim * sizeof(float));
rlon = NhlMalloc(idim * jdim * sizeof(float));
coords_alloced = 1;
if (cnp->sfp->y_arr) {
float y;
for (j = 0; j < jdim; j++) {
y = ((float*)cnp->sfp->y_arr->data)[j];
for (i = 0; i < idim; i++) {
*(rlat + j*idim+i) = y;
}
}
}
else {
float y;
float step = (cnp->sfp->y_end - cnp->sfp->y_start) /
(jdim - 1);
for (j = 0; j < jdim; j++) {
y = cnp->sfp->y_start + j * step;
for (i = 0; i < idim; i++) {
*(rlat + j*idim+i) = y;
}
}
}
if (cnp->sfp->x_arr) {
float *x = ((float*)cnp->sfp->x_arr->data);
for (j = 0; j < jdim; j++) {
memcpy(rlon + j*idim,x,idim * sizeof(float));
}
}
else {
float x;
float step = (cnp->sfp->x_end - cnp->sfp->x_start) /
(idim - 1);
for (i = 0; i < idim; i++) {
x = cnp->sfp->x_start + i * step;
for (j = 0; j < jdim; j++) {
*(rlon + j*idim + i) = x;
}
}
}
}
rdat = (float*)cnp->sfp->d_arr->data;
if (tmp->ezmap) {
c_cttmrg(idim,jdim,rlat,rlon,rdat,
iscr,missing_val,
_NHLCALLF(rtmi,RTMI),
rpnt,mpnt,&npnt,Lopn,
iedg,medg,&nedg,Loen,
itri,mtri,&ntri,Lotn);
}
else {
_NHLCALLF(trmrgr,TRMRGR)
(&idim,&jdim,rlon,rlat,rdat,
iscr,&missing_val,
rpnt,&mpnt,&npnt,&Lopn,
iedg,&medg,&nedg,&Loen,
itri,&mtri,&ntri,&Lotn);
}
NhlFree(iscr);
if (coords_alloced) {
NhlFree(rlon);
NhlFree(rlat);
}
tbp = &(tmp->tri_block[0]);
tbp->npnt = npnt;
tbp->nedg = nedg;
tbp->ntri = ntri;
tbp->rpnt = rpnt;
tbp->iedg = iedg;
tbp->itri = itri;
tmp->update_mode = TRIMESH_NOUPDATE;
tmp->nblocks = 1;
return NhlNOERROR;
}
#define DEGTORAD 0.017453292519943
static NhlErrorTypes BuildNativeMesh
#if NhlNeedProto
(
NhlCnTriMeshRendererLayerPart *tmp,
NhlContourPlotLayer cnl,
NhlString entry_name
)
#else
(tmp,cnl,entry_name)
NhlCnTriMeshRendererLayerPart *tmp;
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
int mnot = cnp->sfp->element_nodes->len_dimensions[0];
int mnop = cnp->sfp->fast_len;
int mnoe = 3 * mnot;
int mpnt = mnop * Lopn;
int medg = mnoe * Loen;
int mtri = mnot * Lotn;
float *rpnt;
int *el;
int *iedg, *itri;
int *ippp,*ippe;
int npnt,nedg;
float *rlat,*rlon,*rdat;
float tbuf[5021][12];
int kbuf = 173;
int mbuf = 5021;
int nppp = 0;
int nppe = 0;
int nbuf = 0;
int ntri = 0;
int i;
int ix_offset = cnp->sfp->ix_start;
int err_num;
char *e_msg;
char *e_text;
TriBlock *tbp;
FreeTriBlockContents(tmp->tri_block,&(tmp->nblocks));
rpnt = NhlMalloc(mpnt * sizeof(float));
iedg = NhlMalloc(medg * sizeof(int));
itri = NhlMalloc(mtri * sizeof(int));
ippp = NhlMalloc(2 * mnop * sizeof(int));
ippe = NhlMalloc(2 * mnoe * sizeof(int));
if (! (rpnt && iedg && itri && ippp && ippe )) {
NHLPERROR((NhlFATAL,ENOMEM,NULL));
return NhlFATAL;
}
rlat = (float*)cnp->sfp->y_arr->data;
rlon = (float*)cnp->sfp->x_arr->data;
rdat = (float*)cnp->sfp->d_arr->data;
el = (int*) cnp->sfp->element_nodes->data;
for (i = 0; i < mnot; i++) {
int *ep;
int e0,e1,e2;
if (nbuf >= mbuf)
_NHLCALLF(hlucttmtl,HLUCTTMTL)
(&kbuf,(float*)tbuf,&mbuf,&nbuf,
ippp,&mnop,&nppp,
ippe,&mnoe,&nppe,
rpnt,&mpnt,&npnt,&Lopn,
iedg,&medg,&nedg,&Loen,
itri,&mtri,&ntri,&Lotn);
if (c_nerro(&err_num) != 0) {
e_msg = c_semess(0);
e_text = "%s: %s";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name,e_msg);
return NhlFATAL;
}
ep = el + i * 3;
e0 = *ep - ix_offset;
e1 = *(ep+1) - ix_offset;
e2 = *(ep+2) - ix_offset;
if (tmp->ezmap) {
tbuf[nbuf][0] = cos(DEGTORAD * (double)rlat[e0]) *
cos(DEGTORAD * (double)rlon[e0]);
tbuf[nbuf][1] = cos(DEGTORAD * (double)rlat[e0]) *
sin(DEGTORAD * (double)rlon[e0]);
tbuf[nbuf][2] = sin(DEGTORAD * (double)rlat[e0]);
tbuf[nbuf][3] = rdat[e0];
tbuf[nbuf][4] = cos(DEGTORAD * (double)rlat[e1]) *
cos(DEGTORAD * (double)rlon[e1]);
tbuf[nbuf][5] = cos(DEGTORAD * (double)rlat[e1]) *
sin(DEGTORAD * (double)rlon[e1]);
tbuf[nbuf][6] = sin(DEGTORAD * (double)rlat[e1]);
tbuf[nbuf][7] = rdat[e1];
tbuf[nbuf][8] = cos(DEGTORAD * (double)rlat[e2]) *
cos(DEGTORAD * (double)rlon[e2]);
tbuf[nbuf][9] = cos(DEGTORAD * (double)rlat[e2]) *
sin(DEGTORAD * (double)rlon[e2]);
tbuf[nbuf][10] = sin(DEGTORAD * (double)rlat[e2]);
tbuf[nbuf][11] = rdat[e2];
}
else {
tbuf[nbuf][0] = rlon[e0];
tbuf[nbuf][1] = rlat[e0];
tbuf[nbuf][2] = 0.0;
tbuf[nbuf][3] = rdat[e0];
tbuf[nbuf][4] = rlon[e1];
tbuf[nbuf][5] = rlat[e1];
tbuf[nbuf][6] = 0.0;
tbuf[nbuf][7] = rdat[e1];
tbuf[nbuf][8] = rlon[e2];
tbuf[nbuf][9] = rlat[e2];
tbuf[nbuf][10] = 0.0;
tbuf[nbuf][11] = rdat[e2];
}
if (! cnp->sfp->missing_value_set)
nbuf++;
else if (tbuf[nbuf][3] != cnp->sfp->missing_value &&
tbuf[nbuf][7] != cnp->sfp->missing_value &&
tbuf[nbuf][11] != cnp->sfp->missing_value) {
nbuf++;
}
}
if (nbuf > 0) {
_NHLCALLF(hlucttmtl,HLUCTTMTL)
(&nbuf,(float*)tbuf,&mbuf,&nbuf,
ippp,&mnop,&nppp,
ippe,&mnoe,&nppe,
rpnt,&mpnt,&npnt,&Lopn,
iedg,&medg,&nedg,&Loen,
itri,&mtri,&ntri,&Lotn);
if (c_nerro(&err_num) != 0) {
e_msg = c_semess(0);
e_text = "%s: %s";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name,e_msg);
return NhlFATAL;
}
}
tbp = &(tmp->tri_block[0]);
tbp->npnt = npnt;
tbp->nedg = nedg;
tbp->ntri = ntri;
tbp->rpnt = rpnt;
tbp->iedg = iedg;
tbp->itri = itri;
tmp->nblocks = 1;
tmp->update_mode = TRIMESH_NOUPDATE;
NhlFree(ippp);
NhlFree(ippe);
/*printf("total number of edges %d\n",nedg);*/
return NhlNOERROR;
}
typedef struct _vertex {
float x;
float y;
int node;
int vert;
} Vertex;
static int vcomp(const void *vv1, const void *vv2)
{
Vertex *v1 = (Vertex *) vv1;
Vertex *v2 = (Vertex *) vv2;
if (v1->x < v2->x)
return -1;
if (v1->x > v2->x)
return 1;
if (v1->y < v2->y)
return -1;
if (v1->y > v2->y)
return 1;
if (v1->node < v2->node)
return -1;
if (v1->node > v2->node)
return 1;
return 0;
}
static void AddElement
#if NhlNeedProto
(
int *el,
Vertex *v,
int vcount,
NhlBoolean ezmap,
float *x,
float *y
)
#else
(el,v,vcount,ezmap,x,y)
int *el;
Vertex *v;
int vcount;
NhlBoolean ezmap;
float *x;
float *y;
#endif
{
int n0,n1,n2;
float detleft, detright,det;
float xt[3],yt[3];
int i;
n0 = v->node;
n1 = (v+1)->node;
n2 = (v+2)->node;
if (vcount > 3) { /* extra node */
if (n1 == n0) {
n1 = (v+3)->node;
}
else if (n2 == n1 || n2 == n0) {
n2 = (v+3)->node;
}
}
xt[0] = x[n0];
xt[1] = x[n1];
xt[2] = x[n2];
yt[0] = y[n0];
yt[1] = y[n1];
yt[2] = y[n2];
if (ezmap) {
float min, max;
min = max = xt[0];
for (i = 1; i < 3; i++) {
if (xt[i] < min)
min = xt[i];
if (xt[i] > max)
max = xt[i];
}
if (max - min > 180) {
for (i = 0; i < 3; i++) {
if (xt[i] - min > 180) {
xt[i] -= 360;
}
}
}
}
detleft = (xt[0] - xt[2]) * (yt[1] - yt[2]);
detright = (yt[0] - yt[2]) * (xt[1] - xt[2]);
det = detleft - detright;
if (det > 0) {
el[0] = n0;
el[1] = n1;
el[2] = n2;
}
else {
el[0] = n0;
el[1] = n2;
el[2] = n1;
}
return;
}
static int *GetTriangleNodes
#if NhlNeedProto
(
NhlGenArray xnodes,
NhlGenArray ynodes,
NhlGenArray xbounds,
NhlGenArray ybounds,
NhlBoolean ezmap,
int *ntri
)
#else
(xnodes,ynodes,xbounds,ybounds,ezmap,ntri)
NhlGenArray xnodes;
NhlGenArray ynodes;
NhlGenArray xbounds;
NhlGenArray ybounds;
NhlBoolean ezmap;
int *ntri;
#endif
{
int ncount = xnodes->len_dimensions[0];
int nvert = xbounds->len_dimensions[1];
int tcount = xbounds->len_dimensions[0] * xbounds->len_dimensions[1];
Vertex *verts;
float *x = (float *) xnodes->data;
float *y = (float *) ynodes->data;
float *xb = (float *) xbounds->data;
float *yb = (float *) ybounds->data;
int i,j;
int *el;
int nmatched;
int tfound;
verts = NhlMalloc(tcount * sizeof(Vertex));
for (i = 0; i < ncount; i++) {
for (j = 0; j < nvert; j++) {
int ix = i * nvert +j;
Vertex *v = verts + ix;
v->node = i;
v->vert = j;
v->x = *(xb + ix);
v->y = *(yb + ix);
}
}
qsort(verts,ncount * nvert,sizeof(Vertex),vcomp);
/*
* There should now be sets of three vertices in a row that
* are co-located. However, because of possible boundaries there
* may be singles or doubles interspersed. Each set of three
* represents a triangle in the mesh, with the vertex as its
* center. Put the nodes into the triangle element list.
* There should be at most ncount * nvert / 3 triangles with
* three elements each.
*/
el = (int *) NhlMalloc(sizeof(int) * ncount * nvert);
tfound = 0;
for (i = 0; i < ncount * nvert; ) {
nmatched = 0;
for (j = i + 1;;j++) {
if (j == ncount * nvert)
break;
if (! (verts[j].x == verts[i].x &&
verts[j].y == verts[i].y))
break;
nmatched++;
}
if (nmatched > 1) {
AddElement(el + 3 * tfound,
&verts[i],nmatched+1,ezmap,x,y);
tfound++;
}
#if 0
/* debugging code */
else {
printf("unmatched node %d %d %f %f\n",verts[i].node,
verts[i].vert, verts[i].x, verts[i].y);
if (nmatched == 1) {
printf("unmatched node %d %d %f %f\n",
verts[i+1].node,verts[i+1].vert,
verts[i+1].x, verts[i+1].y);
}
}
#endif
i = j;
}
NhlFree(verts);
*ntri = tfound;
return el;
}
static NhlErrorTypes BuildNativeMeshFromBounds
#if NhlNeedProto
(
NhlCnTriMeshRendererLayerPart *tmp,
NhlContourPlotLayer cnl,
NhlString entry_name
)
#else
(tmp,cnl,entry_name)
NhlCnTriMeshRendererLayerPart *tmp;
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
int mnot;
int mtri;
int mnop = cnp->sfp->fast_len;
int mnoe;
int mpnt = mnop * Lopn;
int medg;
float *rpnt;
int *el;
int *iedg, *itri;
int *ippp,*ippe;
int npnt,nedg;
float *rlat,*rlon,*rdat;
float tbuf[5021][12];
int kbuf = 173;
int mbuf = 5021;
int nppp = 0;
int nppe = 0;
int nbuf = 0;
int ntri = 0;
int i;
int ix_offset = 0;
int err_num;
char *e_msg;
char *e_text;
TriBlock *tbp;
FreeTriBlockContents(tmp->tri_block,&(tmp->nblocks));
el = GetTriangleNodes(cnp->sfp->x_arr,cnp->sfp->y_arr,
cnp->sfp->x_cell_bounds,cnp->sfp->y_cell_bounds,
tmp->ezmap,&mnot);
mtri = mnot * Lotn;
mnoe = 3 * mnot;
medg = mnoe * Loen;
rpnt = NhlMalloc(mpnt * sizeof(float));
iedg = NhlMalloc(medg * sizeof(int));
itri = NhlMalloc(mtri * sizeof(int));
ippp = NhlMalloc(2 * mnop * sizeof(int));
ippe = NhlMalloc(2 * mnoe * sizeof(int));
if (! (rpnt && iedg && itri && ippp && ippe )) {
NHLPERROR((NhlFATAL,ENOMEM,NULL));
return NhlFATAL;
}
rlat = (float*)cnp->sfp->y_arr->data;
rlon = (float*)cnp->sfp->x_arr->data;
rdat = (float*)cnp->sfp->d_arr->data;
for (i = 0; i < mnot; i++) {
int *ep;
int e0,e1,e2;
if (nbuf >= mbuf)
_NHLCALLF(hlucttmtl,HLUCTTMTL)
(&kbuf,(float*)tbuf,&mbuf,&nbuf,
ippp,&mnop,&nppp,
ippe,&mnoe,&nppe,
rpnt,&mpnt,&npnt,&Lopn,
iedg,&medg,&nedg,&Loen,
itri,&mtri,&ntri,&Lotn);
if (c_nerro(&err_num) != 0) {
e_msg = c_semess(0);
e_text = "%s: %s";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name,e_msg);
return NhlFATAL;
}
ep = el + i * 3;
e0 = *ep - ix_offset;
e1 = *(ep+1) - ix_offset;
e2 = *(ep+2) - ix_offset;
if (tmp->ezmap) {
tbuf[nbuf][0] = cos(DEGTORAD * rlat[e0]) *
cos(DEGTORAD * rlon[e0]);
tbuf[nbuf][1] = cos(DEGTORAD * rlat[e0]) *
sin(DEGTORAD * rlon[e0]);
tbuf[nbuf][2] = sin(DEGTORAD * rlat[e0]);
tbuf[nbuf][3] = rdat[e0];
tbuf[nbuf][4] = cos(DEGTORAD * rlat[e1]) *
cos(DEGTORAD * rlon[e1]);
tbuf[nbuf][5] = cos(DEGTORAD * rlat[e1]) *
sin(DEGTORAD * rlon[e1]);
tbuf[nbuf][6] = sin(DEGTORAD * rlat[e1]);
tbuf[nbuf][7] = rdat[e1];
tbuf[nbuf][8] = cos(DEGTORAD * rlat[e2]) *
cos(DEGTORAD * rlon[e2]);
tbuf[nbuf][9] = cos(DEGTORAD * rlat[e2]) *
sin(DEGTORAD * rlon[e2]);
tbuf[nbuf][10] = sin(DEGTORAD * rlat[e2]);
tbuf[nbuf][11] = rdat[e2];
}
else {
tbuf[nbuf][0] = rlon[e0];
tbuf[nbuf][1] = rlat[e0];
tbuf[nbuf][2] = 0.0;
tbuf[nbuf][3] = rdat[e0];
tbuf[nbuf][4] = rlon[e1];
tbuf[nbuf][5] = rlat[e1];
tbuf[nbuf][6] = 0.0;
tbuf[nbuf][7] = rdat[e1];
tbuf[nbuf][8] = rlon[e2];
tbuf[nbuf][9] = rlat[e2];
tbuf[nbuf][10] = 0.0;
tbuf[nbuf][11] = rdat[e2];
}
if (! cnp->sfp->missing_value_set)
nbuf++;
else if (tbuf[nbuf][3] != cnp->sfp->missing_value &&
tbuf[nbuf][7] != cnp->sfp->missing_value &&
tbuf[nbuf][11] != cnp->sfp->missing_value) {
nbuf++;
}
}
if (nbuf > 0) {
_NHLCALLF(hlucttmtl,HLUCTTMTL)
(&nbuf,(float*)tbuf,&mbuf,&nbuf,
ippp,&mnop,&nppp,
ippe,&mnoe,&nppe,
rpnt,&mpnt,&npnt,&Lopn,
iedg,&medg,&nedg,&Loen,
itri,&mtri,&ntri,&Lotn);
if (c_nerro(&err_num) != 0) {
e_msg = c_semess(0);
e_text = "%s: %s";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name,e_msg);
return NhlFATAL;
}
}
tbp = &(tmp->tri_block[0]);
tbp->npnt = npnt;
tbp->nedg = nedg;
tbp->ntri = ntri;
tbp->rpnt = rpnt;
tbp->iedg = iedg;
tbp->itri = itri;
tmp->nblocks = 1;
tmp->update_mode = TRIMESH_NOUPDATE;
NhlFree(ippp);
NhlFree(ippe);
NhlFree(el);
/*printf("total number of edges %d\n",nedg); */
return NhlNOERROR;
}
/* conpackt structures */
typedef struct _cpoint { /* a point node */
float x;
float y;
float z;
float dat;
} Cpoint;
typedef struct _cedge {
int pix_1; /* base index of edge point 1 - Lopn (4) * (cpoint index + 1) (for Fortran indexing) */
int pix_2;
int trix_l; /* base index of triangle to the left (Lotn (4) * ctri index + 1) */
int trix_r; /* base index of triangle to the right (Lotn (4) * ctri index + 1) */
int flag;
} Cedge;
typedef struct _ctri {
int edge[3]; /* base index of edges of the triangle (Loen (5) * cedge index + edge number) */
int flag;
} Ctri;
static void SortEdges (
TriBlock *tbp
)
{
Cpoint *cpoints = (Cpoint *)tbp->rpnt;
Cedge *cedges = (Cedge *) tbp->iedg;
int i,tmp;
for (i = 0; i < tbp->nedg / Loen; i++) {
if (cpoints[cedges[i].pix_1 / Lopn].dat > cpoints[cedges[i].pix_2 / Lopn].dat) {
tmp = cedges[i].pix_1;
cedges[i].pix_1 = cedges[i].pix_2;
cedges[i].pix_2 = tmp;
tmp = cedges[i].trix_l;
cedges[i].trix_l = cedges[i].trix_r;
cedges[i].trix_r = tmp;
}
}
}
#ifdef BuildTRIANGLE
/* these are the shewchuk structures */
typedef struct _stri {
int nodes[3]; /* vertex node ids */
} Stri;
typedef struct _snode { /* a vertex node */
double x;
double y;
} Snode;
typedef struct _sedge {
int nodes[2]; /* vertex nodes */
} Sedge;
#if 0 /* experimental boundary generation code (depends on Triangle) */
typedef struct _PointAndIndex {
double x,y;
int index;
} PointAndIndex;
static int hlu_sort(const void *p1, const void *p2)
{
PointAndIndex *pi1 = (PointAndIndex *)p1;
PointAndIndex *pi2 = (PointAndIndex *)p2;
if (pi1->x < pi2->x)
return -1;
if (pi1->x > pi2->x)
return 1;
if (pi1->y < pi2->y)
return -1;
if (pi1->y > pi2->y)
return 1;
printf("points %d and %d compare as equal\n", pi1->index, pi2->index);
return 0;
}
static double ccw(PointAndIndex* p1, PointAndIndex* p2, PointAndIndex* p3)
{
return (p2->x - p1->x)*(p3->y - p1->y) - (p2->y - p1->y)*(p3->x - p1->x);
}
static void
convex_hull(PointAndIndex* points, ng_size_t npoints, PointAndIndex*** out_hull, ng_size_t* out_hullsize)
{
PointAndIndex** hull;
ng_size_t i, t, k = 0;
hull = *out_hull;
/* lower hull */
for (i = 0; i < npoints; ++i) {
while (k >= 2 && ccw(hull[k-2], hull[k-1], &(points[i])) < 0) --k;
hull[k++] = &points[i];
}
/* upper hull */
for (i = npoints-2, t = k+1; i >= 0; --i) {
while (k >= t && ccw(hull[k-2], hull[k-1], &(points[i])) < 0) --k;
hull[k++] = &points[i];
}
*out_hull = hull;
*out_hullsize = k;
}
static int* MarkBoundaryPoints(int npnts, int *plist, float *rlon, float *rlat)
{
double ax, ay, bx, by, cx, cy, dx, dy;
double x1,x2,y1,y2,tx,ty;
int i;
int pcount, pix;
PointAndIndex *pandi;
PointAndIndex **hull;
ng_size_t hull_point_count;
ax = bx = cx = dx = rlon[plist[0]];
ay = by = cy = dy = rlat[plist[0]];
x1 = x2 = ax;
y1 = y2 = ay;
pcount = npnts;
for (i = 1; i < npnts; i++) {
tx = rlon[plist[i]];
ty = rlat[plist[i]];
if (tx > x1 && tx < x2 && ty > y1 && ty < y2) {
plist[i] = -1;
pcount--;
continue;
}
if (tx - ty > ax - ay) {
ax = tx;
ay = ty;
}
if (tx + ty > bx + by) {
bx = tx;
by = ty;
}
if (tx - ty < cx - cy) {
cx = tx;
cy = ty;
}
if (tx + ty < dx + dy) {
dx = tx;
dy = ty;
}
x1 = MAX(cx,dx);
x2 = MIN(ax,bx);
y1 = MAX(ay,dy);
y2 = MIN(cy,by);
}
for (i = 0; i < npnts; i++) {
if (plist[i] < 0)
continue; /* already removed */
tx = rlon[plist[i]];
ty = rlat[plist[i]];
if (tx > x1 && tx < x2 && ty > y1 && ty < y2) {
plist[i] = -1;
pcount--;
continue;
}
/*printf("%f,%f\n",tx,ty);*/
}
printf("points remaining %d\n",pcount);
pandi = (PointAndIndex *) NhlMalloc(pcount * sizeof(PointAndIndex));
pix = 0;
for (i = 0; i < npnts; i++) {
if (plist[i] < 0)
continue;
pandi[pix].x = rlon[plist[i]];
pandi[pix].y = rlat[plist[i]];
pandi[pix].index = i; /* now indexing to the current list of points, not the overall list */
pix++;
}
printf("hull candidate point count: %d\n", pix);
qsort(pandi,pix,sizeof(PointAndIndex),hlu_sort);
hull = (PointAndIndex **) NhlMalloc(pix * sizeof(PointAndIndex *));
convex_hull(pandi,pix,&hull,&hull_point_count);
/* printf("hull point count: %ld\n", hull_point_count);*/
/* use plist as the boundary point indicator */
memset(plist,0,npnts * sizeof(int));
for (i = 0; i < hull_point_count; i++) {
printf("%f %f\n",hull[i]->x,hull[i]->y);
plist[hull[i]->index] = 1;
}
return plist;
}
static NhlErrorTypes AddBoundarySegments
(
TriBlock *tbp,
int *npnt,
int *npnt_alloc,
double **points,
float **dat,
int *npnt_added,
float missing_value
)
{
int nbounds = sqrt(*npnt);
int xcount,ycount;
double xsize = tbp->xe - tbp->xs;
double ysize = tbp->ye - tbp->ys;
double xinc, yinc;
double xin, yin, xt, yt;
int pcount = *npnt;
int i;
xcount = 2 * nbounds * xsize / (xsize + ysize);
ycount = 2 * nbounds * ysize / (xsize + ysize);
xinc = xsize / xcount;
yinc = ysize / ycount;
if (*npnt + 4 * nbounds > *npnt_alloc) {
*npnt_alloc= *npnt+ 5 * nbounds;
*points = (double *)NhlRealloc(*points,2 * *npnt_alloc * sizeof(double));
*dat = (float *) NhlRealloc(*dat,*npnt_alloc * sizeof(float));
}
yin = tbp->ys;
if (yin != -90 && yin != 90) {
for (i = 0; i < xcount; i++) {
xin = tbp->xs + i * xinc;
c_mdptra(yin,xin,&xt,&yt);
if (xt > 1e10 || yt > 1e10)
continue;
(*points)[2 * pcount] = xt;
(*points)[2 * pcount + 1] = yt;
pcount++;
}
}
xin = tbp->xe;
for (i = 0; i < ycount; i++) {
yin = tbp->ys + i * yinc;
c_mdptra(yin,xin,&xt,&yt);
if (xt > 1e10 || yt > 1e10)
continue;
(*points)[2 * pcount] = xt;
(*points)[2 * pcount + 1] = yt;
pcount++;
}
yin = tbp->ye;
if (yin != -90 && yin != 90) {
for (i = xcount - 1; i >= 0; i--) {
xin = tbp->xs + i * xinc;
c_mdptra(yin,xin,&xt,&yt);
if (xt > 1e10 || yt > 1e10)
continue;
(*points)[2 * pcount] = xt;
(*points)[2 * pcount + 1] = yt;
pcount++;
}
}
xin = tbp->xs;
for (i = ycount - 1; i >= 0; i--) {
yin = tbp->ys + i * yinc;
c_mdptra(yin,xin,&xt,&yt);
if (xt > 1e10 || yt > 1e10)
continue;
(*points)[2 * pcount] = xt;
(*points)[2 * pcount + 1] = yt;
pcount++;
}
*npnt_added = pcount - *npnt;
for (i = *npnt; i < pcount; i++) {
(*dat)[i] = missing_value;
}
return NhlNOERROR;
}
#endif /* #if 0 for boundary generation code that is not used now but may be in the future */
static NhlErrorTypes BuildDelaunayMesh
#if NhlNeedProto
(
NhlCnTriMeshRendererLayerPart *tmp,
NhlContourPlotLayer cnl,
NhlString entry_name
)
#else
(tmp,cnl,entry_name)
NhlCnTriMeshRendererLayerPart *tmp;
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
int mnop = cnp->sfp->fast_len;
int npnt,nedg;
float *rlat,*rlon;
float *rdat;
int ntri = 0;
int i,j;
struct triangulateio in,out,vout;
char *flags;
int block_count = 1;
int block_size;
int threshold_size = 20000000;
double xs, xe, ys, ye, xstep,ystep,xspace,yspace;
Stri *stris = NULL;
Snode *snodes;
Sedge *sedges, *vedges;
Cpoint *cpoints;
Cedge *cedges;
Ctri *ctris;
int nx_div, ny_div;
TriBlock *tbp;
int npnt_alloc;
int block_ix;
float ye_adj,yadd;
float xe_adj,xadd;
double xt,yt;
int tid, nthreads;
double xmx,xmn,ymx,ymn;
double xtmp,ytmp,deps;
float flx,frx,fby,fuy,wlx,wrx,wby,wuy;
int ll;
c_getset(&flx,&frx,&fby,&fuy,&wlx,&wrx,&wby,&wuy,&ll);
#if 0
printf("getset - %f,%f,%f,%f,%f,%f,%f,%f\n",
flx,frx,fby,fuy,wlx,wrx,wby,wuy);
#endif
threshold_size = 40000000;
FreeTriBlockContents(tmp->tri_block,&(tmp->nblocks));
rlat = (float*)cnp->sfp->y_arr->data;
rlon = (float*)cnp->sfp->x_arr->data;
rdat = (float*)cnp->sfp->d_arr->data;
block_size = mnop;
while (block_size > threshold_size) {
block_size = block_size / 2 + block_size % 2;
block_count *= 2;
}
nx_div = MAX(1,block_count / 2);
ny_div = MAX(1,block_count / 2 + block_count % 2);
deps = 0.001;
#pragma omp parallel shared(cnp, tmp, nx_div, ny_div, block_size, mnop, rlat, rlon, rdat,xstep,ystep,block_count,nthreads,deps,yadd,xadd) \
private(tbp,block_ix,ys,ye,ye_adj,xs,xe,xe_adj,npnt,ntri,nedg,npnt_alloc,xt,yt,i,j,in,out,vout, \
snodes,stris,sedges,vedges,cpoints,cedges,ctris,tid,xmx,xmn,ymx,ymn,ytmp,xtmp)
{
#ifdef _OPENMP
tid = omp_get_thread_num();
nthreads = omp_get_num_threads();
#else
tid = 0;
nthreads = 1;
#endif
if (tid == 0) {
/* printf("%d threads\n",nthreads);*/
if (nthreads > block_count) {
/* block_count = nthreads - nthreads % 2; accept even numbers only */
block_count = nthreads;
}
if (block_count >= tmp->nblocks_alloced) {
NhlFree(tmp->tri_block);
tmp->tri_block = (TriBlock *) NhlCalloc(block_count, sizeof(TriBlock));
tmp->nblocks_alloced = block_count;
}
nx_div = MAX(1,(int) sqrt(block_count));
ny_div = MAX(1,(int) block_count / nx_div);
block_count = nx_div * ny_div;
block_size = mnop / block_count + block_size % block_count;
xspace = wrx - wlx;
yspace = wuy - wby;
/*
point_dist = sqrt(mnop / (xspace * yspace));
try for five point overlap apporximately */
yadd = yspace * 0.01;
xadd = xspace * 0.01;
ys = wby;
ye = ys + (wuy - ys) / ny_div;
ystep = ye - ys;
ye_adj = MIN(ye + yadd,wuy);
block_ix = 0;
npnt_alloc = (int) 1.25 * block_size;
for (j = 0; j < ny_div; j++) {
xs = wlx;
xe = xs + (wrx - xs) / nx_div;
xstep = xe - xs;
xe_adj = MIN(xe + xadd, wrx);
for (i = 0; i < nx_div; i++) {
tbp = &(tmp->tri_block[block_ix]);
tbp->xs = xs;
tbp->xe = xe_adj;
tbp->ys = ys;
tbp->ye = ye_adj;
xs = xe;
xe = xs + xstep;
xe_adj = MIN(xe + xadd,wrx);
tbp->points = (double *)NhlMalloc(2 * npnt_alloc * sizeof(double));
tbp->dat = (float *) NhlMalloc(npnt_alloc * sizeof(float));
tbp->npnt = 0;
tbp->npnt_alloc = npnt_alloc;
block_ix++;
}
ys = ye;
ye = ys + ystep;
ye_adj = MIN(ye + yadd, wuy);
}
for (i = 0; i < mnop; i++) {
xtmp = (double) rlon[i];
ytmp = (double) rlat[i];
if (tmp->ezmap) {
NGCALLF(mdptra,MDPTRA)(&ytmp,&xtmp,&xt,&yt);
if (xt > 1e10 || yt > 1e10)
continue;
}
else {
xt = xtmp;
yt = ytmp;
}
for (block_ix = 0; block_ix < block_count; block_ix++) {
tbp = &(tmp->tri_block[block_ix]);
xs = tbp->xs;
xe = tbp->xe;
ys = tbp->ys;
ye = tbp->ye;
if (xt < xs || xt > xe || yt < ys || yt > ye) {
continue;
}
if (tbp->npnt == tbp->npnt_alloc) {
tbp->npnt_alloc *= 1.5;
tbp->points = (double *)NhlRealloc(tbp->points,2 * tbp->npnt_alloc * sizeof(double));
tbp->dat = (float *) NhlRealloc(tbp->dat,tbp->npnt_alloc * sizeof(float));
}
tbp->points[tbp->npnt * 2] = (double)xt;
tbp->points[tbp->npnt * 2 + 1] = (double)yt;
tbp->dat[tbp->npnt] = rdat[i];
tbp->npnt++;
}
}
}
/*
else {
printf("thread num is %d\n",tid);
}
*/
#pragma omp barrier
#pragma omp for schedule(static,1)
for (block_ix = 0; block_ix < block_count; block_ix++) {
npnt = 0;
ntri = 0;
nedg = 0;
tbp = &(tmp->tri_block[block_ix]);
xs = tbp->xs;
xe = tbp->xe;
ys = tbp->ys;
ye = tbp->ye;
if (tbp->npnt == 0) {
tbp->npnt = 0;
tbp->nedg = 0;
tbp->ntri = 0;
tbp->rpnt = NULL;
tbp->iedg = NULL;
tbp->itri = NULL;
NhlFree(tbp->points);
NhlFree(tbp->dat);
continue;
}
#if 0
printf("thread %d chunk x %f %f y %f %f\n",tid,xs,xe,ys,ye);
seglist = NhlMalloc(sizeof(int) * npnt_added);
segmarkerlist = NhlMalloc(sizeof(int) * npnt_added / 2);
for (i = 0; i < npnt_added / 2; i++) {
seglist[2 * i] = npnt + i;
seglist[2 * i + 1] = npnt + i + 1;
segmarkerlist[i] = 1;
}
#endif
memset(&in,0,sizeof(struct triangulateio));
memset(&out,0,sizeof(struct triangulateio));
memset(&vout,0,sizeof(struct triangulateio));
in.numberofpointattributes = 0;
in.numberoftriangles = 0;
if (cnp->verbose_triangle_info) {
flags = "IBzveVV";
}
else {
flags = "IBzveQ";
}
in.pointlist = tbp->points;
/*
in.segmentlist = seglist;
in.segmentmarkerlist = segmarkerlist;
*/
/*in.pointmarkerlist = blist;*/
out.pointlist = in.pointlist;
/*
in.numberofpoints = npnt + npnt_added;
*/
in.numberofpoints = tbp->npnt;
triangulate(flags,&in,&out,&vout);
/*
printf("triangulation completed\n");
*/
stris = (Stri *) out.trianglelist;
sedges = (Sedge *) out.edgelist;
vedges = (Sedge *) vout.edgelist;
npnt = out.numberofpoints;
ntri = out.numberoftriangles;
nedg = out.numberofedges;
snodes = (Snode *)tbp->points;
cpoints = NhlMalloc(npnt * sizeof(Cpoint));
cedges = NhlMalloc(nedg * sizeof(Cedge));
ctris = NhlMalloc(ntri * sizeof(Ctri));
memset(ctris,(char) 0,ntri *sizeof(Ctri));
xmx = ymx = -999;
xmn = ymn = 9999;
for (i = 0; i < npnt; i++) {
cpoints[i].x = (float) snodes[i].x;
cpoints[i].y = (float) snodes[i].y;
cpoints[i].z = 0.0;
cpoints[i].dat = tbp->dat[i];
#if 0
if (cpoints[i].x > xmx) {
xmx = cpoints[i].x;
ixmx = i;
}
if (cpoints[i].y > ymx) {
ymx = cpoints[i].y;
iymx = i;
}
if (cpoints[i].x < xmn) {
xmn = cpoints[i].x;
ixmn = i;
}
if (cpoints[i].y < ymn) {
ymn = cpoints[i].y;
iymn = i;
}
#endif
}
NhlFree(tbp->points);
tbp->points = NULL;
NhlFree(tbp->dat);
tbp->dat = NULL;
for (i = 0; i < nedg; i++) {
cedges[i].pix_1 = sedges[i].nodes[0] * Lopn;
cedges[i].pix_2 = sedges[i].nodes[1] * Lopn;
/* the Voronoi edges have the same indexes as the Delaunay triangles (they are duals) */
cedges[i].trix_l = vedges[i].nodes[0] > -1 ? vedges[i].nodes[0] * Lotn : -1; /* plus edge number within triangle */
cedges[i].trix_r = vedges[i].nodes[1] > -1 ? vedges[i].nodes[1] * Lotn : -1; /* plus edge number within triangle */
cedges[i].flag = 0;
for (j = 0; j < 3; j++) {
if (vedges[i].nodes[0] > -1 && stris[vedges[i].nodes[0]].nodes[j] == sedges[i].nodes[0]) {
ctris[vedges[i].nodes[0]].edge[j] = i * Loen;
cedges[i].trix_l += (j + 1);
}
if (vedges[i].nodes[1] > -1 && stris[vedges[i].nodes[1]].nodes[j] == sedges[i].nodes[1]) {
ctris[vedges[i].nodes[1]].edge[j] = i * Loen;
cedges[i].trix_r += (j+1);
}
}
}
if (cnp->sfp->missing_value_set) {
/* since there are only 3 values, stored in 6 possible locations, looking at 5 of them should be sufficient */
for (i = 0; i < ntri; i++) {
if (cpoints[cedges[ctris[i].edge[0]/Loen].pix_1/Lopn].dat == cnp->sfp->missing_value ||
cpoints[cedges[ctris[i].edge[1]/Loen].pix_1/Lopn].dat == cnp->sfp->missing_value ||
cpoints[cedges[ctris[i].edge[2]/Loen].pix_1/Lopn].dat == cnp->sfp->missing_value ||
cpoints[cedges[ctris[i].edge[0]/Loen].pix_2/Lopn].dat == cnp->sfp->missing_value ||
cpoints[cedges[ctris[i].edge[1]/Loen].pix_2/Lopn].dat == cnp->sfp->missing_value) {
ctris[i].flag = 1;
}
}
}
else {
for (i = 0; i < ntri; i++) {
if (cpoints[cedges[ctris[i].edge[0]/Loen].pix_1/Lopn].dat >= 1e32 ||
cpoints[cedges[ctris[i].edge[1]/Loen].pix_1/Lopn].dat >= 1e32 ||
cpoints[cedges[ctris[i].edge[2]/Loen].pix_1/Lopn].dat >= 1e32 ||
cpoints[cedges[ctris[i].edge[0]/Loen].pix_2/Lopn].dat >= 1e32 ||
cpoints[cedges[ctris[i].edge[1]/Loen].pix_2/Lopn].dat >= 1e32) {
ctris[i].flag = 1;
}
}
}
free(stris);
free(sedges);
free(vedges);
tbp->npnt = npnt * Lopn;
tbp->nedg = nedg * Loen;
tbp->ntri = ntri * Lotn;
tbp->rpnt = (float *) cpoints;
tbp->iedg = (int *) cedges;
tbp->itri = (int *) ctris;
SortEdges(tbp);
}
}
tmp->nblocks = block_count;
tmp->update_mode = TRIMESH_NOUPDATE;
return NhlNOERROR;
}
#endif /* ifdef BuildTRIANGLE */
static void FreeTriBlockContents (
TriBlock *tri_block,
int *count
)
{
TriBlock *tb;
int i;
if (*count == 0)
return;
for (i = 0; i < *count; i++) {
tb = &(tri_block[i]);
NhlFree(tb->rpnt);
NhlFree(tb->iedg);
NhlFree(tb->itri);
}
*count = 0;
return;
}
/*
* Function: CnTriMeshRendererInitialize
*
* Description:
*
* In Args: class objects layer_class
* req instance record of requested values
* new instance record of new object
* args list of resources and values for reference
* num_args number of elements in args.
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects:
*/
/*ARGSUSED*/
static NhlErrorTypes
CnTriMeshRendererInitialize
#if NhlNeedProto
(
NhlClass class,
NhlLayer req,
NhlLayer new,
_NhlArgList args,
int num_args
)
#else
(class,req,new,args,num_args)
NhlClass class;
NhlLayer req;
NhlLayer new;
_NhlArgList args;
int num_args;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
NhlCnTriMeshRendererLayer tml = (NhlCnTriMeshRendererLayer) new;
NhlCnTriMeshRendererLayerPart *tmp = &tml->cntrimeshrenderer;
NhlContourPlotLayer cnl;
NhlContourPlotLayerPart *cnp;
static int initial_block_count = 16;
load_hluct_routines(False);
cnl = (NhlContourPlotLayer) tml->base.parent;
cnp = &cnl->contourplot;
/* allowing for multiple blocks -- initialize the block pointers to allow an initial supply */
tmp->tri_block = (TriBlock *) NhlCalloc(initial_block_count, sizeof (TriBlock));
tmp->nblocks = 0;
tmp->nblocks_alloced = initial_block_count;
return ret;
}
/*
* Function: CnTriMeshRendererDestroy
*
* Description:
*
* In Args: inst instance record pointer
*
* Out Args: NONE
*
* Return Values: ErrorConditions
*
* Side Effects: NONE
*/
static NhlErrorTypes CnTriMeshRendererDestroy
#if NhlNeedProto
(NhlLayer inst)
#else
(inst)
NhlLayer inst;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
NhlCnTriMeshRendererLayer tml = (NhlCnTriMeshRendererLayer) inst;
NhlCnTriMeshRendererLayerPart *tmp = &tml->cntrimeshrenderer;
FreeTriBlockContents(tmp->tri_block,&(tmp->nblocks));
NhlFree(tmp->tri_block);
return ret;
}
/*
* Function: SetCtParams
*
* Description:
*
* In Args: layer ContourPlot instance
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: NONE
*/
static NhlErrorTypes SetCtParams
#if NhlNeedProto
(NhlContourPlotLayer cnl,NhlString entry_name)
#else
(cnl,entry_name)
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
NhlString *sp;
int i,j;
char param[4];
float value;
if (cnp->conpack_params == NULL)
return NhlNOERROR;
sp = (NhlString *) cnp->conpack_params->data;
for (i = 0; i < cnp->conpack_params->num_elements; i++) {
NhlBoolean matched = False;
_cnParamType type;
if (sp[i] != NULL && sp[i][0] != '\0') {
value = 0.0;
sscanf(sp[i],"%3s:%f",¶m[0],&value);
for (j = 0; j < NhlNumber(Ct_Params); j ++) {
if (! strcmp(Ct_Params[j].name,param)) {
matched = True;
type = Ct_Params[j].type;
break;
}
}
if (matched && type == cnInt) {
c_ctseti(param,(int) value);
}
else if (matched && type == cnFloat) {
c_ctsetr(param,value);
}
else {
char * e_text =
"%s: %s is invalid Conpack param or cannot be from HLU library";
NhlPError(NhlWARNING,
NhlEUNKNOWN,e_text,entry_name,param);
ret = MIN(ret,NhlWARNING);
}
}
}
return ret;
}
/*
* Function: SetRegionAttrs
*
* Description:
*
* In Args: layer ContourPlot instance
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: Updates various internal parameters in Conpack,Plotchar,
* etc.
*
*/
static void SetRegionAttrs
#if NhlNeedProto
(
NhlContourPlotLayer cl,
NhlcnRegionAttrs *reg_attrs,
int cpix
)
#else
(cl,reg_attrs,cpix)
NhlContourPlotLayer cl;
NhlcnRegionAttrs *reg_attrs;
int cpix;
#endif
{
reg_attrs->gks_pcolor = reg_attrs->perim_color == NhlTRANSPARENT ?
NhlTRANSPARENT : _NhlGetGksCi(cl->base.wkptr,
reg_attrs->perim_color);
reg_attrs->gks_fcolor = reg_attrs->fill_color == NhlTRANSPARENT ?
NhlTRANSPARENT : _NhlGetGksCi(cl->base.wkptr,
reg_attrs->fill_color);
c_ctseti("PAI",cpix);
if (! reg_attrs->perim_on)
c_ctseti("CLU",0);
else if (cpix == -1 && cl->contourplot.missing_val_perim_grid_bound_on)
c_ctseti("CLU",2);
else
c_ctseti("CLU",1);
if (cpix == -1)
c_ctseti("AIA",99);
else if (cpix == -2)
c_ctseti("AIA",97);
else
c_ctseti("AIA",-1);
return;
}
/*
* Function: UpdateLineAndLabelParams
*
* Description:
*
* In Args: layer ContourPlot instance
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: Updates various internal parameters in Conpack,Plotchar,
* etc.
*
*/
static NhlErrorTypes UpdateLineAndLabelParams
#if NhlNeedProto
(
NhlContourPlotLayer cl,
NhlBoolean *do_lines,
NhlBoolean *do_labels
)
#else
(cl,do_lines,do_labels)
NhlContourPlotLayer cl;
NhlBoolean *do_lines;
NhlBoolean *do_labels;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
NhlContourPlotLayerPart *cnp = &(cl->contourplot);
float *clvp;
int *clup;
int i,j;
float height;
cnp->line_lbls.text = (NhlString *) cnp->llabel_strings->data;
if (cnp->line_lbls.mono_color) {
if (cnp->line_lbls.color == NhlTRANSPARENT)
cnp->line_lbls.gks_color = NhlTRANSPARENT;
else
cnp->line_lbls.gks_color =
_NhlGetGksCi(cl->base.wkptr,
cnp->line_lbls.color);
}
else
cnp->line_lbls.colors = cnp->gks_llabel_colors;
if (cnp->line_lbls.back_color == NhlTRANSPARENT)
cnp->line_lbls.gks_bcolor = NhlTRANSPARENT;
else
cnp->line_lbls.gks_bcolor =
_NhlGetGksCi(cl->base.wkptr,
cnp->line_lbls.back_color);
/*
* If the perim color is transparent the line will not be
* drawn, but just in case, set the gks color to the foreground
*/
if (cnp->line_lbls.perim_lcolor == NhlTRANSPARENT)
cnp->line_lbls.gks_plcolor =
_NhlGetGksCi(cl->base.wkptr,NhlFOREGROUND);
else
cnp->line_lbls.gks_plcolor =
_NhlGetGksCi(cl->base.wkptr,
cnp->line_lbls.perim_lcolor);
if (cnp->high_lbls.color == NhlTRANSPARENT)
cnp->high_lbls.gks_color = NhlTRANSPARENT;
else
cnp->high_lbls.gks_color =
_NhlGetGksCi(cl->base.wkptr,
cnp->high_lbls.color);
if (cnp->high_lbls.back_color == NhlTRANSPARENT)
cnp->high_lbls.gks_bcolor = NhlTRANSPARENT;
else
cnp->high_lbls.gks_bcolor =
_NhlGetGksCi(cl->base.wkptr,
cnp->high_lbls.back_color);
if (cnp->high_lbls.perim_lcolor == NhlTRANSPARENT)
cnp->high_lbls.gks_plcolor = NhlTRANSPARENT;
else
cnp->high_lbls.gks_plcolor =
_NhlGetGksCi(cl->base.wkptr,
cnp->high_lbls.perim_lcolor);
if (cnp->low_lbls.color == NhlTRANSPARENT)
cnp->low_lbls.gks_color = NhlTRANSPARENT;
else
cnp->low_lbls.gks_color =
_NhlGetGksCi(cl->base.wkptr,
cnp->low_lbls.color);
if (cnp->low_lbls.back_color == NhlTRANSPARENT)
cnp->low_lbls.gks_bcolor = NhlTRANSPARENT;
else
cnp->low_lbls.gks_bcolor =
_NhlGetGksCi(cl->base.wkptr,
cnp->low_lbls.back_color);
if (cnp->low_lbls.perim_lcolor == NhlTRANSPARENT)
cnp->low_lbls.gks_plcolor = NhlTRANSPARENT;
else
cnp->low_lbls.gks_plcolor =
_NhlGetGksCi(cl->base.wkptr,
cnp->low_lbls.perim_lcolor);
SetRegionAttrs(cl,&cnp->grid_bound,-1);
SetRegionAttrs(cl,&cnp->missing_val,-1);
SetRegionAttrs(cl,&cnp->out_of_range,-2);
*do_lines = True;
*do_labels = False;
gset_line_colr_ind((Gint)_NhlGetGksCi(cl->base.wkptr,0));
gset_text_colr_ind((Gint)_NhlGetGksCi(cl->base.wkptr,0));
gset_linewidth(1.0);
c_ctseti("CLS",0); /* Conpack not to select levels */
c_ctseti("NCL",cnp->level_count);
clvp = (float *) cnp->levels->data;
clup = (int *) cnp->level_flags->data;
c_ctseti("DPU",-1); /* dash pattern use flag */
if (cnp->mono_line_color) {
cnp->gks_line_colors[0] = cnp->line_color == NhlTRANSPARENT ?
NhlTRANSPARENT :
_NhlGetGksCi(cl->base.wkptr,cnp->line_color);
} else {
cnp->gks_line_colors[0] =
((int *)cnp->line_colors->data)[0] == NhlTRANSPARENT ?
NhlTRANSPARENT :
_NhlGetGksCi(cl->base.wkptr,
((int *)cnp->line_colors->data)[0]);
}
if (cnp->mono_line_color && cnp->gks_line_colors[0] == NhlTRANSPARENT)
*do_lines = False;
if (! cnp->lines_on)
*do_lines = False;
for (i=0; i<cnp->level_count; i++) {
int pai,aia,aib;
NhlcnLevelUseMode flag;
NhlBoolean blank = True;
char *cp;
pai = i+1;
aib = NhlcnAREAID_OFFSET+i;
aia = NhlcnAREAID_OFFSET+i+1;
c_ctseti("PAI",pai);
c_ctsetr("CLV",(float)clvp[i]);
c_ctseti("AIB",aib);
c_ctseti("AIA",aia);
flag = cnp->mono_level_flag ?
cnp->level_flag : (NhlcnLevelUseMode) clup[i];
if (! *do_lines) {
switch (flag) {
case NhlNOLINE:
case NhlLINEONLY:
default:
flag = NhlNOLINE;
break;
case NhlLABELONLY:
case NhlLINEANDLABEL:
flag = NhlLABELONLY;
break;
}
}
#if 0
printf("pai %d,clv %f,aib %d,aia %d\n",pai,clvp[i],aib,aia);
#endif
cp = ((NhlString*)cnp->line_lbls.text)[i];
if (cp) {
for (j = 0; j < strlen(cp); j++) {
if (! isgraph(cp[j]))
continue;
blank = False;
}
}
if (blank) {
switch (flag) {
case NhlNOLINE:
case NhlLABELONLY:
default:
flag = NhlNOLINE;
break;
case NhlLINEONLY:
case NhlLINEANDLABEL:
flag = NhlLINEONLY;
break;
}
}
c_ctseti("CLU",flag);
c_ctsetc("LLT",cp);
}
if (cnp->level_selection_mode != NhlEXPLICITLEVELS)
c_ctsetr("CIU",(float)cnp->level_spacing);
/* Set up for labels */
/* Conpack not to render the Informational label */
c_ctsetc("ILT"," ");
/* Line labels */
if (! cnp->line_lbls.on) {
c_ctseti("LLP",0);
}
else if (cnp->llabel_placement == NhlCONSTANT) {
*do_labels = True;
c_ctseti("LLP",1);
#if 0
c_ctsetr("DPS",
(float)(cnp->line_lbls.real_height / cl->view.width));
c_ctsetr("DPV",(float).015);
#endif
#if 0
c_ctsetr("RC3",(float)0.0);
c_ctseti("LLP",2);
if (cnp->line_lbls.angle < 0.0)
c_ctseti("LLO",1); /* angle to contour direction */
else {
c_ctseti("LLO",0); /* fixed angle */
c_ctsetr("LLA",(float)cnp->line_lbls.angle);
}
#endif
}
else if (cnp->llabel_placement == NhlRANDOMIZED) {
*do_labels = True;
c_ctseti("LLP",2);
if (cnp->line_lbls.angle < 0.0)
c_ctseti("LLO",1); /* angle to contour direction */
else {
c_ctseti("LLO",0); /* fixed angle */
c_ctsetr("LLA",(float)cnp->line_lbls.angle);
}
if (cnp->llabel_density > 0.0) {
float rc1 = 0.25 / cnp->llabel_density;
float rc2 = 0.25 / cnp->llabel_density;
float rc3 = 0.05 / cnp->llabel_density;
c_ctsetr("RC1",rc1);
c_ctsetr("RC2",rc2);
c_ctsetr("RC3",rc3);
}
}
else {
*do_labels = True;
c_ctseti("LLP",3);
if (cnp->line_lbls.angle < 0.0)
c_ctseti("LLO",1); /* angle to contour direction */
else {
c_ctseti("LLO",0); /* fixed angle */
c_ctsetr("LLA",(float)cnp->line_lbls.angle);
}
if (cnp->llabel_density > 0.0) {
float pc1 = 1.0;
float pc2 = 5.0;
float pc3 = 60.0;
float pc4 = 0.05;
float pc5 = 0.15;
float pc6 = 0.30;
float pw1 = 2.0;
float pw2 = 0.0;
float pw3 = 1.0;
float pw4 = 1.0;
pc6 /= cnp->llabel_density;
pc3 = pc3 + 30 * (cnp->llabel_density - 1);
pc1 *= cnp->llabel_density;
pc5 *= cnp->llabel_density;
c_ctsetr("PC1",pc1);
c_ctsetr("PC2",pc2);
c_ctsetr("PC3",pc3);
c_ctsetr("PC4",pc4);
c_ctsetr("PC5",pc5);
c_ctsetr("PC6",pc6);
c_ctsetr("PW1",pw1);
c_ctsetr("PW2",pw2);
c_ctsetr("PW3",pw3);
c_ctsetr("PW4",pw4);
}
}
if (*do_labels) {
height = cnp->line_lbls.real_height / cl->view.width;
c_ctsetr("LLS",(float)height);
c_ctsetr("LLW",
(float) (height * cnp->line_lbls.perim_space));
if (cnp->line_lbls.back_color == NhlTRANSPARENT) {
if (cnp->line_lbls.perim_lcolor == NhlTRANSPARENT ||
! cnp->line_lbls.perim_on)
c_ctseti("LLB",0);
else
c_ctseti("LLB",1);
}
else {
c_ctseti("LBC",cnp->line_lbls.back_color);
if (cnp->line_lbls.perim_lcolor == NhlTRANSPARENT ||
! cnp->line_lbls.perim_on)
c_ctseti("LLB",2);
else
c_ctseti("LLB",3);
}
}
/*
* In order to allow user control of the high and low attributes
* individually set the appropriate part of the flag on if either
* the high or the low is on. Further distinguishing between high and low
* occurs in the low level routine cpchhl_
*/
if (! cnp->high_lbls.on)
c_ctsetc("HIT"," ");
else
c_ctsetc("HIT",(NhlString)cnp->high_lbls.text);
if (! cnp->low_lbls.on)
c_ctsetc("LOT"," ");
else
c_ctsetc("LOT",(NhlString)cnp->low_lbls.text);
/*
* Due to the way Conpack works it is not possible to have different text
* sizes, white space, background and perim on/off settings for the high
* and low labels. The high labels take precedence, so set up accordingly.
* Background and perim can have different colors, except that if the
* high background or perim is transparent (emulated by turning these
* features off) then the corresponding low feature must also become
* transparent.
* This means that
* cnLowLabelFontHeightF
* cnLowLabelAngleF
* cnLowLabelPerimSpacingF
* cnLowLabelPerimOn
* are always ignored.
* cnLowLabelBackgroundColor and cnLowLabelPerimColor can be set independently
* of the corresponding HighLabel resource if that resource is not set to
* transparent. However, if the low label resource is set to transparent in
* this case, it will be coerced to transparent.
* Update 2013/01/14: using the new transparency features, individual control
* of cnLowLabelPerimOn is now possible. And cnLowLabelBackgroundColor and
* cnLowLabelPerimColor can be transparent independent of the cnHighLabel values.
*
* It could be possible to set the low label font height independently of
* the high label font height, but it will require a more sophisticated
* method than the LowLabelFactor which is commented out below.
*/
if (cnp->high_lbls.on || cnp->low_lbls.on) {
*do_labels = True;
height = cnp->high_lbls.real_height / cl->view.width;
#if 0
LowLabelFactor = cnp->high_lbls.real_height /
cnp->low_lbls.real_height;
#endif
c_ctsetr("HLS",(float)height);
c_ctsetr("HLW",(float)(cnp->high_lbls.perim_space * height));
c_ctsetr("HLA",(float)cnp->high_lbls.angle);
c_ctseti("HLO", (int) cnp->high_low_overlap);
if ((!cnp->high_lbls.on || cnp->high_lbls.back_color == NhlTRANSPARENT) &&
(!cnp->low_lbls.on || cnp->low_lbls.back_color == NhlTRANSPARENT)) {
if ((!cnp->high_lbls.perim_on || cnp->high_lbls.perim_lcolor == NhlTRANSPARENT) &&
(! cnp->low_lbls.perim_on || cnp->low_lbls.perim_lcolor == NhlTRANSPARENT))
c_ctseti("HLB",0);
else {
c_ctseti("HLB",1);
cnp->hlb_val = 1;
}
}
else {
if ((!cnp->high_lbls.perim_on || cnp->high_lbls.perim_lcolor == NhlTRANSPARENT) &&
(! cnp->low_lbls.perim_on || cnp->low_lbls.perim_lcolor == NhlTRANSPARENT)) {
c_ctseti("HLB",2);
cnp->hlb_val = 2;
}
else {
c_ctseti("HLB",3);
cnp->hlb_val = 3;
}
}
}
c_pcsetc("FC",":");
return ret;
}
/*
* Function: UpdateFillInfo
*
* Description:
*
* In Args: layer ContourPlot instance
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: sets the do_fill Boolean flag depending on whether
* fill is to be done.
*
*/
static NhlErrorTypes UpdateFillInfo
#if NhlNeedProto
(
NhlContourPlotLayer cl,
NhlBoolean *do_fill,
NhlBoolean *almost_const
)
#else
(cl,do_fill)
NhlContourPlotLayer cl;
NhlBoolean *do_fill;
NhlBoolean *almost_const;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
NhlContourPlotLayerPart *cnp = &(cl->contourplot);
float *levels = (float *) cnp->levels->data;
int i;
_NhlSetFillOpacity(cl, cnp->fill_opacity);
/*
* Since the missing value fill resources are not supposed to be affected
* by the mono flags, you cannot optimize the fill away if mono fill color is
* true and fill color is transparent or mono fill pattern is true and the
* fill pattern is hollow. So just keep it simple.
*
*/
if (! cnp->fill_on) {
*do_fill = False;
return ret;
}
*do_fill = True;
*almost_const = False;
for (i = 0; i< cnp->level_count -1 ; i++) {
if (cnp->zmin >= levels[i] &&
cnp->zmax <= levels[i + 1]) {
*almost_const = True;
return ret;
}
}
return ret;
}
/*
* Function: ContourAbortDraw
*
* Description: cleans up if a fatal error occurs while drawing
*
* In Args: layer ContourPlot instance
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: NONE
*/
static void ContourAbortDraw
#if NhlNeedProto
(
NhlContourPlotLayer cnl
)
#else
(cnl)
NhlContourPlotLayer cnl;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
NhlTransformLayerPart *tfp = &(cnl->trans);
char *e_text;
Cnp = NULL;
Cnl = NULL;
if (cnp->aws != NULL) {
_NhlIdleWorkspace(cnp->aws);
cnp->aws = NULL;
}
if (cnp->cws != NULL) {
_NhlIdleWorkspace(cnp->cws);
cnp->cws = NULL;
}
if (cnp->fws != NULL) {
_NhlIdleWorkspace(cnp->fws);
cnp->fws = NULL;
}
if (cnp->iws != NULL) {
_NhlIdleWorkspace(cnp->iws);
cnp->iws = NULL;
}
if (cnl->view.use_segments && cnp->current_trans_dat) {
_NhlEndSegment(cnp->current_trans_dat);
cnp->current_trans_dat = NULL;
}
if (cnp->wk_active) {
_NhlDeactivateWorkstation(cnl->base.wkptr);
cnp->wk_active = False;
}
if (cnp->low_level_log_on) {
NhlVASetValues(tfp->trans_obj->base.id,
NhlNtrLowLevelLogOn,False,NULL);
cnp->low_level_log_on = False;
}
e_text = "%s: draw error";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,"ContourPlotDraw");
}
/*
* Function: AddDataBoundToAreamap
*
* Description:
*
* In Args:
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects:
*
*/
static NhlErrorTypes AddDataBoundToAreamap
#if NhlNeedProto
(
NhlContourPlotLayer cl,
NhlString entry_name
)
#else
(cl,entry_name)
NhlContourPlotLayer cl;
NhlString entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
char *e_text;
NhlContourPlotLayerPart *cnp =
(NhlContourPlotLayerPart *) &cl->contourplot;
int status;
NhlBoolean ezmap = False;
int xrev,yrev;
float xa[5],ya[5];
float xeps,yeps;
#define _cnBBOXGID 3
#if 0
#define _cnMAPBOUNDINC 3700
#endif
#define _cnMAPBOUNDINC 100
if (cnp->trans_obj->base.layer_class->base_class.class_name ==
NhlmapTransObjClass->base_class.class_name) {
ezmap = True;
}
#if 0
gset_linewidth(4.0);
gset_line_colr_ind(30);
c_arseti("RC(1)",1);
c_arseti("RC(3)",2);
#endif
c_arseti("RC",1);
if (! ezmap) {
float twlx,twrx,twby,twuy;
float gwlx,gwrx,gwby,gwuy;
float txmin,txmax,tymin,tymax;
float gxmin,gxmax,gymin,gymax;
NhlBoolean lbox, rbox, bbox, tbox;
ret = NhlVAGetValues(cnp->trans_obj->base.id,
NhlNtrXMinF,&txmin,
NhlNtrXMaxF,&txmax,
NhlNtrYMinF,&tymin,
NhlNtrYMaxF,&tymax,
NULL);
_NhlDataToWin(cnp->trans_obj,&txmin,&tymin,
1,&twlx,&twby,&status,
NULL,NULL);
if (status) {
e_text = "%s: data boundary is out of range";
NhlPError(NhlWARNING,NhlEUNKNOWN,e_text,entry_name);
ret = MIN(ret,NhlWARNING);
return ret;
}
_NhlDataToWin(cnp->trans_obj,&txmax,&tymax,
1,&twrx,&twuy,&status,
NULL,NULL);
if (status) {
e_text = "%s: data boundary is out of range";
NhlPError(NhlWARNING,NhlEUNKNOWN,e_text,entry_name);
ret = MIN(ret,NhlWARNING);
return ret;
}
gxmin = MAX(txmin,cnp->xlb);
gxmax = MIN(txmax,cnp->xub);
gymin = MAX(tymin,cnp->ylb);
gymax = MIN(tymax,cnp->yub);
_NhlDataToWin(cnp->trans_obj,&gxmin,&gymin,
1,&gwlx,&gwby,&status,
NULL,NULL);
if (status) {
e_text = "%s: data boundary is out of range";
NhlPError(NhlWARNING,NhlEUNKNOWN,e_text,entry_name);
ret = MIN(ret,NhlWARNING);
return ret;
}
_NhlDataToWin(cnp->trans_obj,&gxmax,&gymax,
1,&gwrx,&gwuy,&status,
NULL,NULL);
if (status) {
e_text = "%s: data boundary is out of range";
NhlPError(NhlWARNING,NhlEUNKNOWN,e_text,entry_name);
ret = MIN(ret,NhlWARNING);
return ret;
}
xrev = twlx > twrx;
yrev = twby > twuy;
/*
* added a hack to prevent fill dropout in certain cases, where because
* of floating point precision issues in the mapping routines, contour
* lines were being removed because they didn't quite touch the viewport
* edge. Now a very thin rectangle is drawn just to the inside of each
* viewport edge in this situation.
*/
xeps = 1e-5 * fabs(twrx-twlx);
yeps = 1e-5 * fabs(twuy-twby);
if (! xrev) {
if (gwlx >= twlx && gwlx - twlx < xeps)
gwlx = twlx + xeps;
if (gwrx <= twrx && twrx - gwrx < xeps)
gwrx = twrx - xeps;
lbox = gwlx > twlx;
rbox = gwrx < twrx;
}
else {
if (gwrx >= twrx && gwrx - twrx < xeps)
gwrx = twrx + xeps;
if (gwlx <= twlx && twlx - gwlx < xeps)
gwlx = twlx - xeps;
lbox = gwlx < twlx;
rbox = gwrx > twrx;
}
if (! yrev) {
if (gwby >= twby && gwby - twby < xeps)
gwby = twby + yeps;
if (gwuy <= twuy && twuy - gwuy < yeps)
gwuy = twuy - yeps;
bbox = gwby > twby;
tbox = gwuy < twuy;
}
else {
if (gwuy >= twuy && gwuy - twuy < yeps)
gwuy = twuy + yeps;
if (gwby <= twby && twby - gwby < yeps)
gwby = twby - yeps;
bbox = gwby > twby;
tbox = gwuy < twuy;
}
/*
* This code from 'added a hack' above to the end of 'if (! ezmap)' below was not working properly
* for log plots where the lower values get stretched by log scaling. It sometime resulted in
* boxes that were big enough to be visible. The solution is to start with NDC values and convert them
* to data values using the current transformation. That is what the code below does. If it succeeds
* (st is 0) then the inside coordinates of the skinny boxes are replaced. (Are there situations where
* it would fail? -- it seems safest to allow for that possibility.
*/
{
float xn[4],yn[4];
float xe,ye;
int st;
float oor;
float x,y,w,h;
x = cl->view.x;
y = cl->view.y;
w = cl->view.width;
h = cl->view.height;
xe = 1e-5 * w;
ye = 1e-5 * h;
xn[0] = x + xe;
xn[1] = x + w - xe;
xn[2] = xn[1];
xn[3] = xn[0];
yn[0] = y - h + ye;
yn[1] = yn[0];
yn[2] = y - ye;
yn[3] = yn[2];
NhlNDCToData(cl->base.id,xn,yn,4,xn,yn,NULL,NULL,&st,&oor);
if (! st) {
_NhlDataToWin(cnp->trans_obj,xn,yn,
4,xn,yn,&st,NULL,NULL);
}
if (! st) {
gwlx = xn[0];
gwrx = xn[1];
gwby = yn[0];
gwuy = yn[2];
}
}
if (lbox) {
xa[0] = xa[1] = xa[4] = twlx;
xa[2] = xa[3] = gwlx;
ya[0] = ya[3] = ya[4] = twuy;
ya[1] = ya[2] = twby;
if (! (xrev || yrev) || (xrev && yrev))
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,9999,0,entry_name);
else
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,0,9999,entry_name);
}
if (rbox) {
xa[0] = xa[1] = xa[4] = gwrx;
xa[2] = xa[3] = twrx;
ya[0] = ya[3] = ya[4] = twuy;
ya[1] = ya[2] = twby;
if (! (xrev || yrev) || (xrev && yrev))
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,9999,0,entry_name);
else
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,0,9999,entry_name);
}
if (bbox) {
xa[0] = xa[1] = xa[4] = gwlx;
xa[2] = xa[3] = gwrx;
ya[0] = ya[3] = ya[4] = gwby;
ya[1] = ya[2] = twby;
if (! (xrev || yrev) || (xrev && yrev))
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,9999,0,entry_name);
else
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,0,9999,entry_name);
}
if (tbox) {
xa[0] = xa[1] = xa[4] = gwlx;
xa[2] = xa[3] = gwrx;
ya[0] = ya[3] = ya[4] = twuy;
ya[1] = ya[2] = gwuy;
if (! (xrev || yrev) || (xrev && yrev))
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,9999,0,entry_name);
else
_NhlAredam(cnp->aws,xa,ya,
5,_cnBBOXGID,0,9999,entry_name);
}
}
else {
char cval[4];
#if 0
/* apparently none of this stuff is necessary as long as you set the vertical strips correctly*/
if (! cnp->fix_fill_bleed)
return NhlNOERROR;
ret = NhlVAGetValues(cnp->trans_obj->base.id,
NhlNmpBottomWindowF,&wb,
NhlNmpTopWindowF,&wt,
NhlNmpLeftWindowF,&wl,
NhlNmpRightWindowF,&wr,
NULL);
/* draw thin rectangles */
xeps = 1e-5 * fabs(wt - wb);
yeps = 1e-5 * fabs(wr - wl);
xa[0] = xa[3] = xa[4] = wl;
xa[1] = xa[2] = wl + xeps;
ya[0] = ya[1] = ya[4] = wb;
ya[2] = ya[3] = wt;
_NhlAredam(cnp->aws,xa,ya,1,3,0,-1,entry_name);
xa[0] = xa[3] = xa[4] = wr;
xa[1] = xa[2] = wr - xeps;
ya[0] = ya[1] = ya[4] = wb;
ya[2] = ya[3] = wt;
_NhlAredam(cnp->aws,xa,ya,1,3,0,-1,entry_name);
xa[0] = xa[3] = xa[4] = wl + xeps;
xa[1] = xa[2] = wr - xeps;
ya[0] = ya[1] = ya[4] = wb;
ya[2] = ya[3] = wb + yeps;
_NhlAredam(cnp->aws,xa,ya,1,3,0,-1,entry_name);
xa[0] = xa[3] = xa[4] = wl + xeps;
xa[1] = xa[2] = wr - xeps;
ya[0] = ya[1] = ya[4] = wt - yeps;
ya[2] = ya[3] = wt;
_NhlAredam(cnp->aws,xa,ya,1,3,0,-1,entry_name);
#endif
c_mpgetc("OU",cval,3);
c_mpsetc("OU","NO");
c_mpseti("G2",3);
c_mpseti("VS",1);
_NhlMapbla(cnp->aws,entry_name);
c_mpsetc("OU",cval);
}
return NhlNOERROR;
}
/*
* Function: cnInitAreamap
*
* Description:
*
* In Args:
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: NONE
*/
static NhlErrorTypes cnInitAreamap
#if NhlNeedProto
(
NhlContourPlotLayer cnl,
NhlString entry_name
)
#else
(cnl,entry_name)
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR, subret = NhlNOERROR;
char *e_text;
NhlContourPlotLayerPart *cnp = &(cnl->contourplot);
if (cnp->aws_id < 1) {
cnp->aws_id =
_NhlNewWorkspace(NhlwsAREAMAP,
NhlwsNONE,1000000*sizeof(int));
if (cnp->aws_id < 1)
return MIN(ret,(NhlErrorTypes)cnp->aws_id);
}
if ((cnp->aws = _NhlUseWorkspace(cnp->aws_id)) == NULL) {
e_text =
"%s: error reserving label area map workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
return(ret);
}
#if 0
c_arseti("lc",(int) (cnp->amap_crange *
MIN(cnl->view.width,cnl->view.height)));
#endif
subret = _NhlArinam(cnp->aws,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) return ret;
return ret;
}
static float Xsoff,Xeoff,Ysoff,Yeoff;
/*
* Function: cnInitCellArray
*
* Description:
*
* In Args:
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: NONE
*/
static NhlErrorTypes cnInitCellArray
#if NhlNeedProto
(
NhlContourPlotLayer cnl,
int *msize,
int *nsize,
NhlBoundingBox *bbox,
float *min_cell_size,
NhlString entry_name
)
#else
(cnl,entry_name)
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR, subret = NhlNOERROR;
char *e_text;
NhlContourPlotLayerPart *cnp = &(cnl->contourplot);
int dunits,dwidth,dheight;
int max_msize, max_nsize;
NhlBoolean xlinear,ylinear;
int mcount,ncount;
c_ctseti("CAF", -1);
xlinear = True;
ylinear = True;
if (cnp->sfp->d_arr->num_dimensions == 1) {
/* x and y cells are meaningless, but we need something */
mcount = ncount = sqrt(cnp->sfp->fast_len);
} else {
mcount = cnp->sfp->fast_len;
ncount = cnp->sfp->slow_len;
}
subret = CnGetDataBound(cnl,bbox,&xlinear,&ylinear,
&mcount,&ncount,&Xsoff,&Xeoff,&Ysoff,&Yeoff,entry_name);
if ((ret = MIN(ret,subret)) < NhlWARNING) return ret;
max_msize = (int) ((bbox->r - bbox->l) / cnp->min_cell_size);
max_nsize = (int) ((bbox->t - bbox->b) / cnp->min_cell_size);
subret = NhlVAGetValues(cnl->base.wkptr->base.id,
NhlNwkVSWidthDevUnits,&dunits,
NULL);
if ((ret = MIN(ret,subret)) < NhlWARNING)
return ret;
dwidth = dunits * (bbox->r - bbox->l);
dheight = dunits * (bbox->t - bbox->b);
*min_cell_size = MAX(1.0/dunits,cnp->min_cell_size);
if (cnp->sticky_cell_size_set) {
if ((bbox->r - bbox->l) / cnp->cell_size <= 1.0 ||
(bbox->t - bbox->b) / cnp->cell_size <= 1.0) {
e_text =
"%s: invalid value for %s: defaulting";
NhlPError(NhlWARNING,NhlEUNKNOWN,e_text,
entry_name,NhlNcnRasterCellSizeF);
ret = NhlWARNING;
cnp->sticky_cell_size_set = False;
}
}
if (cnp->sticky_cell_size_set) {
*msize = (int) ((bbox->r - bbox->l) / cnp->cell_size + 0.5);
*nsize = (int) ((bbox->t - bbox->b) / cnp->cell_size + 0.5);
}
else if (cnp->raster_sample_factor <= 0.0) {
*msize = mcount;
*nsize = ncount;
}
else if (cnp->raster_smoothing_on) {
*msize = dwidth * cnp->raster_sample_factor;
*nsize = dheight * cnp->raster_sample_factor;
}
else {
if (! xlinear)
*msize = dwidth * cnp->raster_sample_factor;
else
*msize = MIN(dwidth,mcount)
* cnp->raster_sample_factor;
if (! ylinear)
*nsize = dheight * cnp->raster_sample_factor;
else
*nsize = MIN(dheight,ncount)
* cnp->raster_sample_factor;
}
if (!cnp->sticky_cell_size_set && cnp->raster_sample_factor > 0.0) {
*msize = MIN(*msize,max_msize);
*nsize = MIN(*nsize,max_nsize);
cnp->cell_size = (bbox->r - bbox->l) / (float) *msize;
}
if (cnp->cws_id < 1) {
cnp->cws_id =
_NhlNewWorkspace(NhlwsOTHER,NhlwsNONE,
(*msize * *nsize) * sizeof(int));
if (cnp->cws_id < 1)
return MIN(ret,(NhlErrorTypes)cnp->cws_id);
}
if ((cnp->cws = _NhlUseWorkspace(cnp->cws_id)) == NULL) {
e_text =
"%s: error reserving cell array workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
return(NhlFATAL);
}
{
NhlWorkspaceRec *cwsrp = (NhlWorkspaceRec *) cnp->cws;
int *cell = cwsrp->ws_ptr;
int grid_fill_ix, i, j;
grid_fill_ix = MAX(Cnp->missing_val.gks_fcolor, Cnp->grid_bound.gks_fcolor);
grid_fill_ix = grid_fill_ix < 0 ? NhlTRANSPARENT_CI : grid_fill_ix;
/*grid_fill_ix = -9999;*/
for (j = 0; j < *nsize; j++) {
for (i = 0; i < *msize; i++) {
*(cell + j * *msize + i) = grid_fill_ix;
}
}
}
return ret;
}
/*
* Function: cnInitDataArray
*
* Description:
*
* In Args:
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: NONE
*/
static NhlErrorTypes cnInitDataArray
#if NhlNeedProto
(
NhlContourPlotLayer cnl,
int *msize,
int *nsize,
NhlBoundingBox *bbox,
float *min_cell_size,
NhlString entry_name
)
#else
(cnl,entry_name)
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR, subret = NhlNOERROR;
char *e_text;
NhlContourPlotLayerPart *cnp = &(cnl->contourplot);
int dunits,dwidth,dheight;
int max_msize, max_nsize;
NhlBoolean xlinear,ylinear;
int mcount,ncount;
c_ctseti("CAF", -1);
xlinear = True;
ylinear = True;
if (cnp->sfp->d_arr->num_dimensions == 1) {
/* x and y cells are meaningless, but we need something */
mcount = ncount = sqrt(cnp->sfp->fast_len);
} else {
mcount = cnp->sfp->fast_len;
ncount = cnp->sfp->slow_len;
}
subret = CnGetDataBound(cnl,bbox,&xlinear,&ylinear,
&mcount,&ncount,&Xsoff,&Xeoff,&Ysoff,&Yeoff,entry_name);
if ((ret = MIN(ret,subret)) < NhlWARNING) return ret;
max_msize = (int) ((bbox->r - bbox->l) / cnp->min_cell_size);
max_nsize = (int) ((bbox->t - bbox->b) / cnp->min_cell_size);
subret = NhlVAGetValues(cnl->base.wkptr->base.id,
NhlNwkVSWidthDevUnits,&dunits,
NULL);
if ((ret = MIN(ret,subret)) < NhlWARNING)
return ret;
dwidth = dunits * (bbox->r - bbox->l);
dheight = dunits * (bbox->t - bbox->b);
*min_cell_size = MAX(1.0/dunits,cnp->min_cell_size);
if (cnp->sticky_cell_size_set) {
if ((bbox->r - bbox->l) / cnp->cell_size <= 1.0 ||
(bbox->t - bbox->b) / cnp->cell_size <= 1.0) {
e_text =
"%s: invalid value for %s: defaulting";
NhlPError(NhlWARNING,NhlEUNKNOWN,e_text,
entry_name,NhlNcnRasterCellSizeF);
ret = NhlWARNING;
cnp->sticky_cell_size_set = False;
}
}
if (cnp->sticky_cell_size_set) {
*msize = (int) ((bbox->r - bbox->l) / cnp->cell_size + 0.5);
*nsize = (int) ((bbox->t - bbox->b) / cnp->cell_size + 0.5);
}
else if (cnp->raster_sample_factor <= 0.0) {
*msize = mcount;
*nsize = ncount;
}
else if (cnp->raster_smoothing_on) {
*msize = dwidth * cnp->raster_sample_factor;
*nsize = dheight * cnp->raster_sample_factor;
}
else {
if (! xlinear)
*msize = dwidth * cnp->raster_sample_factor;
else
*msize = MIN(dwidth,mcount)
* cnp->raster_sample_factor;
if (! ylinear)
*nsize = dheight * cnp->raster_sample_factor;
else
*nsize = MIN(dheight,ncount)
* cnp->raster_sample_factor;
}
if (!cnp->sticky_cell_size_set && cnp->raster_sample_factor > 0.0) {
*msize = MIN(*msize,max_msize);
*nsize = MIN(*nsize,max_nsize);
cnp->cell_size = (bbox->r - bbox->l) / (float) *msize;
}
return ret;
}
static NhlErrorTypes UpdateMeshData
#if NhlNeedProto
(
NhlCnTriMeshRendererLayerPart *tmp,
NhlContourPlotLayer cnl,
NhlString entry_name
)
#else
(tmp,cnl,entry_name)
NhlCnTriMeshRendererLayerPart *tmp;
NhlContourPlotLayer cnl;
NhlString entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
float *rlat,*rlon;
float *rdat;
int i;
int ret = NhlNOERROR;
double xtmp,ytmp,xt,yt;
int block_ix;
int pcount[256];
TriBlock *tbp;
Cpoint *cpp;
float xs,xe,ys,ye;
rlat = (float*)cnp->sfp->y_arr->data;
rlon = (float*)cnp->sfp->x_arr->data;
rdat = (float*)cnp->sfp->d_arr->data;
memset(pcount,0,sizeof(int) * MIN(tmp->nblocks,256));
for (i = 0; i < cnp->sfp->fast_len; i++) {
xtmp = (double) rlon[i];
ytmp = (double) rlat[i];
if (tmp->ezmap) {
NGCALLF(mdptra,MDPTRA)(&ytmp,&xtmp,&xt,&yt);
if (xt > 1e10 || yt > 1e10)
continue;
}
else {
xt = xtmp;
yt = ytmp;
}
for (block_ix = 0; block_ix < tmp->nblocks; block_ix++) {
tbp = &(tmp->tri_block[block_ix]);
cpp = (Cpoint *) tbp->rpnt;
xs = tbp->xs;
xe = tbp->xe;
ys = tbp->ys;
ye = tbp->ye;
if (xt < xs || xt > xe || yt < ys || yt > ye) {
continue;
}
if (_NhlCmpFAny2((float)xt,cpp[pcount[block_ix]].x,6,_NhlMIN_NONZERO) == 0 &&
_NhlCmpFAny2((float)yt,cpp[pcount[block_ix]].y,6,_NhlMIN_NONZERO)) {
cpp[pcount[block_ix]].dat = rdat[i];
pcount[block_ix]++;
}
if (pcount[block_ix] > tbp->npnt / Lopn) {
NhlPError(NhlFATAL,NhlEUNKNOWN,"%s: internal logic error",entry_name);
return NhlFATAL;
}
}
}
for (block_ix = 0; block_ix < tmp->nblocks; block_ix++) {
tbp = &(tmp->tri_block[block_ix]);
SortEdges(tbp);
}
return ret;
}
static NhlErrorTypes InitMesh
#if NhlNeedProto
(
NhlContourPlotLayer cnl,
NhlCnTriMeshRendererLayerPart *tmp,
int do_ctmesh,
NhlString entry_name
)
#else
(cnl,tmp)
NhlContourPlotLayer cnl;
NhlCnTriMeshRendererLayerPart *tmp;
int do_ctmesh;
NhlString entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
NhlErrorTypes ret = NhlNOERROR;
TriBlock *tbp;
if (tmp->update_mode == TRIMESH_DATAUPDATE && tmp->nblocks > 0 && ! do_ctmesh) {
ret = UpdateMeshData(tmp,cnl,entry_name);
return ret;
}
else if (tmp->update_mode > TRIMESH_NOUPDATE || tmp->nblocks == 0) {
if (cnp->sfp->grid_type == NhlMESHGRID) {
if (cnp->sfp->element_nodes) {
ret = BuildNativeMesh(tmp,cnl,entry_name);
}
else if (cnp->sfp->x_cell_bounds &&
cnp->sfp->y_cell_bounds) {
ret = BuildNativeMeshFromBounds
(tmp,cnl,entry_name);
}
else {
#ifdef BuildTRIANGLE
/* this routine sorts the edges for left/right so the ctmesh routine does not have to */
ret = BuildDelaunayMesh(tmp,cnl,entry_name);
tbp = &(tmp->tri_block[0]);
ret = MIN(ret,_NhlHLUCtmesh(tbp->rpnt,tbp->npnt,Lopn,
tbp->iedg,tbp->nedg,Loen,
tbp->itri,tbp->ntri,Lotn,
cnp->fws,cnp->iws,entry_name));
return ret;
#else
NhlPError(NhlFATAL,NhlEUNKNOWN,
"Cannot create triangular mesh: supply additional resources or build with Triangle package");
return NhlFATAL;
#endif
}
}
else {
ret = BuildTriangularMesh(tmp,cnl,entry_name);
}
}
tbp = &(tmp->tri_block[0]);
_NhlCtmesh(tbp->rpnt,tbp->npnt,Lopn,
tbp->iedg,tbp->nedg,Loen,
tbp->itri,tbp->ntri,Lotn,
cnp->fws,cnp->iws,entry_name);
return ret;
}
static NhlErrorTypes ContourLineRender (
NhlContourPlotLayer cnl,
NhlCnTriMeshRendererLayerPart *tmp,
int * mesh_inited,
NhlString entry_name
)
{
NhlErrorTypes ret, subret;
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
TriBlock *tbp;
int i;
NhlString e_text;
int do_ctmesh = 0;
if (! *mesh_inited) {
ret = InitMesh(cnl,tmp,1,entry_name);
if (ret < NhlWARNING) {
ContourAbortDraw(cnl);
return ret;
}
*mesh_inited = 1;
}
/* does not work yet
if (tmp->nblocks > 1) {
do_ctmesh = 1;
}
*/
if (tmp->nblocks > 1) {
e_text = "%s: Threading not implemented for contour lines -- set env var OMP_NUM_THREADS to 1";
NhlPError(NhlFATAL,NhlEUNKNOWN,
e_text,entry_name);
ContourAbortDraw(cnl);
return NhlFATAL;
}
#if 0
#pragma omp parallel shared(cnp, tmp,entry_name,Lopn,Loen,Lotn) \
private(tbp,Tbp)
{
#pragma omp for schedule(static,1)
#endif
for (i = 0; i < tmp->nblocks; i++) {
tbp = &(tmp->tri_block[i]);
Tbp = tbp;
if (do_ctmesh) {
_NhlHLUCtmesh(tbp->rpnt,tbp->npnt,Lopn,
tbp->iedg,tbp->nedg,Loen,
tbp->itri,tbp->ntri,Lotn,
cnp->fws,cnp->iws,entry_name);
}
subret = _NhlCtcldr(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
break;
}
}
#if 0
}
#endif
return ret;
}
static NhlErrorTypes RasterFillRender (
NhlContourPlotLayer cnl,
NhlCnTriMeshRendererLayerPart *tmp,
int * mesh_inited,
NhlString entry_name
)
{
int msize,nsize;
float min_cell_size;
NhlBoundingBox bbox;
NhlErrorTypes ret, subret;
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
TriBlock *tbp;
int i;
int nthreads;
int fill_op = 0;
ret = cnInitCellArray(cnl,&msize,&nsize,&bbox,
&min_cell_size,entry_name);
if (ret < NhlWARNING) {
return ret;
}
if (! *mesh_inited) {
ret = InitMesh(cnl,tmp,0,entry_name);
if (ret < NhlWARNING) {
ContourAbortDraw(cnl);
return ret;
}
*mesh_inited = 1;
}
#if 0 /* now the C routine handles both smoothed and unsmoothed */
if (tmp->nblocks > 1 && cnp->raster_smoothing_on) {
subret = MIN(NhlFATAL,subret);
e_text = "%s: Threading not implemented for smoothed raster contouring -- set env var OMP_NUM_THREADS to 1";
NhlPError(NhlFATAL,NhlEUNKNOWN,
e_text,entry_name);
ContourAbortDraw(cnl);
return (ret = MIN(subret,ret));
}
#endif
/* we need to call ctmesh at least once */
tbp = &(tmp->tri_block[0]);
_NhlHLUCtmesh(tbp->rpnt,tbp->npnt,Lopn,
tbp->iedg,tbp->nedg,Loen,
tbp->itri,tbp->ntri,Lotn,
cnp->fws,cnp->iws,entry_name);
#pragma omp parallel shared(cnp, tmp,entry_name,Lopn,Loen,Lotn,nthreads,bbox,msize,nsize,min_cell_size,fill_op) \
private(tbp,i)
{
#ifdef _OPENMP
nthreads = omp_get_num_threads();
#else
nthreads = 1;
#endif
if (nthreads > 1) fill_op = 2;
#pragma omp for schedule(static,1)
for (i = tmp->nblocks -1 ; i >= 0; i--) {
/* for (i = 0; i < tmp->nblocks ; i++) { */
tbp = &(tmp->tri_block[i]);
Tbp = tbp;
if (tbp->npnt == 0)
continue;
subret = _NhlCtcica(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,cnp->cws,
msize,msize,nsize,
bbox.l,bbox.b,bbox.r,bbox.t,
min_cell_size,
cnp->raster_smoothing_on,
fill_op,
(void *) tbp,
entry_name);
}
#pragma omp barrier
}
#if 0
else {
#pragma omp critical
_NhlCtmesh(tbp->rpnt,tbp->npnt,Lopn,
tbp->iedg,tbp->nedg,Loen,
tbp->itri,tbp->ntri,Lotn,
cnp->fws,cnp->iws,entry_name);
subret = _NhlCtcica(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,cnp->cws,
msize,msize,nsize,
bbox.l,bbox.b,bbox.r,bbox.t,
min_cell_size,
cnp->raster_smoothing_on,
fill_op,
(void *) tbp,
entry_name);
}
}
}
#endif
#if 0 /* for debugging */
{
NhlWorkspaceRec *cwsrp = (NhlWorkspaceRec *) cnp->cws;
int *cell = cwsrp->ws_ptr;
int j;
int grid_fill_ix;
int cell_count;
grid_fill_ix = MAX(Cnp->missing_val.gks_fcolor, Cnp->grid_bound.gks_fcolor);
grid_fill_ix = grid_fill_ix < 0 ? NhlTRANSPARENT_CI : grid_fill_ix;
cell_count = 0;
for (j = 0; j < nsize; j++) {
for (i = 0; i < msize; i++) {
if (*(cell + j * msize + i) == grid_fill_ix) {
printf("cell i %d j %d not initialized\n", i, j);
cell_count++;
}
}
}
printf("%d cells of %d x %d array not initialized\n",cell_count,msize,nsize);
}
#endif
subret = _NhlCtcica(NULL,NULL,NULL,
cnp->fws,cnp->iws,cnp->cws,
msize,msize,nsize,
bbox.l,bbox.b,bbox.r,bbox.t,
min_cell_size,
cnp->raster_smoothing_on,
3,
NULL,
entry_name);
if (cnp->cws != NULL) {
subret = _NhlIdleWorkspace(cnp->cws);
ret = MIN(subret,ret);
cnp->cws = NULL;
}
return ret;
}
static NhlErrorTypes DoConstFillHack(
NhlContourPlotLayerPart *cnp,
NhlBoolean on
)
{
int i,ix;
float *levels = (float *) cnp->levels->data;
static int save_fill_color = 0, save_fill_pattern = 0;
static float save_fill_scale = 0;
static NhlBoolean save_mono_fill_color = False, save_mono_fill_pattern = False,
save_mono_fill_scale;
float test_val;
static float save_test_val;
if (! on) {
cnp->mono_fill_color = save_mono_fill_color;
cnp->mono_fill_pattern = save_mono_fill_pattern;
cnp->mono_fill_scale = save_mono_fill_scale;
cnp->fill_color = save_fill_color;
cnp->fill_pattern = save_fill_pattern;
cnp->fill_scale = save_fill_scale;
cnp->data[0] = save_test_val;
return NhlNOERROR;
}
if (! cnp->data) {
printf("no data\n");
return NhlWARNING;
}
save_test_val = test_val = cnp->data[0];
ix = -1;
for (i = 0; i< cnp->level_count; i++) {
if (test_val >= levels[i])
continue;
ix = i;
break;
}
if (ix == -1) {
ix = cnp->level_count;
}
if (ix > 1) {
cnp->data[0] = levels[0];
}
else {
cnp->data[0] = levels[cnp->level_count - 1];
}
save_mono_fill_color = cnp->mono_fill_color;
save_mono_fill_pattern = cnp->mono_fill_pattern;
save_mono_fill_scale = cnp->mono_fill_scale;
save_fill_color = cnp->fill_color;
save_fill_pattern = cnp->fill_pattern;
save_fill_scale = cnp->fill_scale;
save_test_val = test_val;
if (! cnp->mono_fill_pattern)
cnp->fill_pattern = ((int *) cnp->fill_patterns->data)[ix];
if (! cnp->mono_fill_scale)
cnp->fill_scale = ((float *) cnp->fill_scales->data)[ix];
if (! cnp->mono_fill_color)
cnp->fill_color = ((int *) cnp->fill_colors->data)[ix];
cnp->mono_fill_pattern = True;
cnp->mono_fill_color = True;
cnp->mono_fill_scale = True;
return NhlNOERROR;
}
static NhlErrorTypes CnTriMeshRender
#if NhlNeedProto
(
NhlLayer instance,
NhlContourPlotLayer cnl,
NhlDrawOrder order,
NhlString entry_name
)
#else
(instance,cnl,order,entry_name)
NhlLayer instance;
NhlContourPlotLayer cnl;
NhlDrawOrder order;
NhlString entry_name;
#endif
{
NhlCnTriMeshRendererLayer tml = (NhlCnTriMeshRendererLayer) instance;
NhlCnTriMeshRendererLayerPart *tmp = &tml->cntrimeshrenderer;
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
NhlString e_text;
NhlErrorTypes ret = NhlNOERROR,subret = NhlNOERROR;
int mesh_inited = 0;
Gint err_ind;
Gclip clip_ind_rect;
TriBlock *tbp;
int trans_change_count;
NhlBoolean almost_const;
int do_fill;
int do_const_fill_hack = 0;
tbp = &(tmp->tri_block[0]);
Cnl = cnl;
Cnp = cnp;
Tmp = tmp;
ginq_clip(&err_ind,&clip_ind_rect);
gset_clip_ind(GIND_CLIP);
c_ctrset();
SetCtParams(cnl,entry_name);
subret = NhlVAGetValues(cnl->trans.overlay_trans_obj->base.id,
NhlNtrChangeCount,&trans_change_count,
NULL);
if (trans_change_count > tmp->trans_change_count) {
tmp->update_mode = TRIMESH_NEWMESH;
tmp->trans_change_count = trans_change_count;
}
/*
* Only set the ORV parameter if overlaying on EZMAP. It can cause
* problems otherwise. (Not sure yet whether it is needed in some cases
* though, and perhaps not needed in certain Ezmap cases.
*/
if (cnp->trans_obj->base.layer_class->base_class.class_name ==
NhlmapTransObjClass->base_class.class_name) {
NhlVAGetValues(cnp->trans_obj->base.id,
NhlNtrOutOfRangeF, &cnp->out_of_range_val,
NULL);
tmp->ezmap = 1;
c_ctsetr("ORV",cnp->out_of_range_val);
if (cnp->sfp->d_arr->num_dimensions == 1 &&
! (cnp->sfp->element_nodes ||
(cnp->sfp->x_cell_bounds && cnp->sfp->y_cell_bounds))) {
c_ctseti("MAP",Nhlcn1DMESHMAPVAL);
}
else {
c_ctseti("MAP",NhlcnMAPVAL);
}
}
else {
tmp->ezmap = 0;
c_ctseti("MAP",NhlcnTRIMESHMAPVAL);
}
c_ctseti("WSO", 3); /* error recovery on */
c_ctseti("NVS",0); /* no vertical strips */
c_ctseti("HLE",1); /* search for equal high/lows */
c_ctseti("SET",0);
c_ctseti("RWC",500);
c_ctseti("RWG",1500);
c_ctsetc("CFT","");
c_ctsetr("PIT",MAX(0.0,cnp->max_point_distance));
if (cnp->smoothing_on) {
c_ctsetr("T2D",cnp->smoothing_tension);
c_ctsetr("SSL",cnp->smoothing_distance);
}
else {
c_ctsetr("T2D",(float)0.0);
}
gset_fill_colr_ind((Gint)_NhlGetGksCi(cnl->base.wkptr,0));
subret = UpdateLineAndLabelParams(cnl,&cnp->do_lines,&cnp->do_labels);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
subret = UpdateFillInfo(cnl, &cnp->do_fill,&almost_const);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
if (cnp->fill_mode == NhlAREAFILL && (almost_const || (cnp->const_field && cnp->do_constf_fill))) {
DoConstFillHack(cnp, True);
do_const_fill_hack = 1;
}
/* Retrieve workspace pointers */
if ((cnp->fws = _NhlUseWorkspace(cnp->fws_id)) == NULL) {
e_text = "%s: error reserving float workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return(ret);
}
if ((cnp->iws = _NhlUseWorkspace(cnp->iws_id)) == NULL) {
e_text = "%s: error reserving integer workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return(ret);
}
/* Draw the contours */
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
#if 0
{ /* for debugging */
float flx,frx,fby,fuy,wlx,wrx,wby,wuy; int ll;
c_getset(&flx,&frx,&fby,&fuy,&wlx,&wrx,&wby,&wuy,&ll);
printf("getset - %f,%f,%f,%f,%f,%f,%f,%f\n",
flx,frx,fby,fuy,wlx,wrx,wby,wuy);
}
#endif
do_fill = cnp->do_fill;
if (cnp->const_field && ! cnp->do_constf_fill) {
do_fill = False;
}
if (cnp->output_gridded_data) {
int msize,nsize;
NhlBoundingBox bbox;
float min_cell_size;
if (! mesh_inited) {
subret = InitMesh(cnl,tmp,1,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
mesh_inited = 1;
}
if (tmp->nblocks > 1) {
subret = MIN(NhlFATAL,subret);
e_text = "%s: Threading not implemented for gridded data output -- set env var OMP_NUM_THREADS to 1";
NhlPError(NhlFATAL,NhlEUNKNOWN, e_text,entry_name);
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
subret = cnInitDataArray(cnl,&msize,&nsize,&bbox,
&min_cell_size,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
subret = CnTriMeshWriteCellData
(tbp->rpnt,tbp->iedg,tbp->itri,
msize,nsize,
bbox.l,bbox.b,bbox.r,bbox.t,
entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
}
else if (do_fill && cnp->fill_order == order) {
NhlcnFillMode fill_mode = cnp->fill_mode;
if (fill_mode == NhlAREAFILL) {
if (! mesh_inited) {
subret = InitMesh(cnl,tmp,1,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
mesh_inited = 1;
}
if (tmp->nblocks > 1) {
subret = MIN(NhlFATAL,subret);
e_text = "%s: Threading not implemented for AreaFill -- set env var OMP_NUM_THREADS to 1";
NhlPError(NhlFATAL,NhlEUNKNOWN,
e_text,entry_name);
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return ret;
}
if (cnp->aws == NULL) {
subret = cnInitAreamap(cnl,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
}
if (! cnp->aws) {
e_text = "%s: Error reserving workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,
e_text,entry_name);
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return NhlFATAL;
}
subret = AddDataBoundToAreamap(cnl,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
subret = _NhlCtclam(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,
cnp->aws,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
if (cnp->dump_area_map)
_NhlDumpAreaMap(cnp->aws,entry_name);
subret = _NhlArscam(cnp->aws,
(_NHLCALLF(hluctfill,HLUCTFILL)),
entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
subret = _NhlIdleWorkspace(cnp->aws);
ret = MIN(subret,ret);
cnp->aws = NULL;
if (do_const_fill_hack) {
DoConstFillHack(cnp, False);
do_const_fill_hack = 0;
}
}
else if (fill_mode == NhlCELLFILL) {
if (cnp->sfp->x_arr->num_dimensions == 1 &&
! (cnp->sfp->x_cell_bounds && cnp->sfp->y_cell_bounds)) {
NhlPError(NhlFATAL,NhlEUNKNOWN,
"%s: The CellFill method for non-rectangular Mesh data requires vertices to be explicitly defined using the sf[XY]CellBounds resources",entry_name);
ContourAbortDraw(cnl);
return NhlFATAL;
}
_NhlCellFill((NhlLayer)cnl,entry_name);
}
else if (fill_mode == NhlMESHFILL) { /* NhlMESHFILL */
int msize,nsize;
float min_cell_size;
NhlBoundingBox bbox;
NhlPError(NhlFATAL,NhlEUNKNOWN,
"%s: the MeshFill method does not yet produce correct results for unstructured grids\n",entry_name);
ContourAbortDraw(cnl);
return NhlFATAL;
subret = cnInitCellArray(cnl,&msize,&nsize,&bbox,
&min_cell_size,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
subret = _NhlCtcica(NULL,NULL,NULL,NULL,NULL,
cnp->cws,
msize,msize,nsize,
bbox.l,bbox.b,bbox.r,bbox.t,
min_cell_size,
cnp->raster_smoothing_on,
1,
NULL,
entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
if (cnp->cws != NULL) {
subret = _NhlIdleWorkspace(cnp->cws);
ret = MIN(subret,ret);
cnp->cws = NULL;
}
}
else { /* NhlRASTERFILL */
ret = RasterFillRender(cnl,tmp,&mesh_inited,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
}
}
if (! cnp->output_gridded_data &&
cnp->line_order == order &&
(cnp->do_lines || cnp->missing_val.perim_on ||
cnp->grid_bound.perim_on || cnp->out_of_range.perim_on)) {
if (cnp->do_labels && cnp->label_masking) {
if (! mesh_inited) {
subret = InitMesh(cnl,tmp,1,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
mesh_inited = 1;
}
if (tmp->nblocks > 1) {
subret = MIN(NhlFATAL,subret);
e_text = "%s: Threading not implemented for contour lines -- set env var OMP_NUM_THREADS to 1";
NhlPError(NhlFATAL,NhlEUNKNOWN,
e_text,entry_name);
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
c_ctseti("GIL",5);
if (cnp->aws == NULL) {
subret = cnInitAreamap(cnl,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
}
if (! cnp->aws) {
e_text = "%s: Error reserving workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,
e_text,entry_name);
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return NhlFATAL;
}
c_pcsetr("PH",(float)cnp->line_lbls.pheight);
c_pcsetr("PW",(float)cnp->line_lbls.pwidth);
c_pcsetr("CS",(float)cnp->line_lbls.cspacing);
c_pcseti("FN",cnp->line_lbls.font);
c_pcseti("QU",cnp->line_lbls.quality);
c_pcsetc("FC",cnp->line_lbls.fcode);
subret = _NhlCtlbam(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,
cnp->aws,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
subret = _NhlCtcldm(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,cnp->aws,
(_NHLCALLF(ctdrpl,CTDRPL)),
entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
subret = _NhlIdleWorkspace(cnp->aws);
ret = MIN(subret,ret);
cnp->aws = NULL;
}
else {
ret = ContourLineRender(cnl,tmp,&mesh_inited,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
}
}
if (! cnp->output_gridded_data &&
cnp->do_labels && cnp->label_order == order) {
if (! mesh_inited) {
subret = InitMesh(cnl,tmp,1,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
mesh_inited = 1;
}
if (tmp->nblocks > 1) {
subret = MIN(NhlFATAL,subret);
e_text = "%s: Threading not implemented for contour line labels -- set env var OMP_NUM_THREADS to 1";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
cnp->line_lbls.count = 0;
cnp->high_lbls.count = 0;
cnp->low_lbls.count = 0;
gset_fill_int_style(GSTYLE_SOLID);
c_pcsetr("PH",(float)cnp->line_lbls.pheight);
c_pcsetr("PW",(float)cnp->line_lbls.pwidth);
c_pcsetr("CS",(float)cnp->line_lbls.cspacing);
c_pcseti("FN",cnp->line_lbls.font);
c_pcseti("QU",cnp->line_lbls.quality);
c_pcsetc("FC",cnp->line_lbls.fcode);
_NhlCtlbdr(tbp->rpnt,tbp->iedg,tbp->itri,
cnp->fws,cnp->iws,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return ret;
}
}
if (cnp->fws != NULL) {
subret = _NhlIdleWorkspace(cnp->fws);
ret = MIN(subret,ret);
cnp->fws = NULL;
}
if (cnp->iws != NULL) {
subret = _NhlIdleWorkspace(cnp->iws);
cnp->iws = NULL;
ret = MIN(subret,ret);
}
gset_clip_ind(clip_ind_rect.clip_ind);
return MIN(subret,ret);
}
static NhlIsoLine *CnTriMeshGetIsoLines
#if NhlNeedProto
(
NhlLayer instance,
NhlContourPlotLayer cnl,
int n_levels,
float *levels,
NhlString entry_name
)
#else
(instance,cnl,order,entry_name)
NhlLayer instance;
NhlContourPlotLayer cnl;
int n_levels;
float *levels;
NhlString entry_name;
#endif
{
NhlCnTriMeshRendererLayer tml = (NhlCnTriMeshRendererLayer) instance;
NhlCnTriMeshRendererLayerPart *tmp = &tml->cntrimeshrenderer;
NhlContourPlotLayerPart *cnp = &cnl->contourplot;
NhlErrorTypes ret = NhlNOERROR,subret = NhlNOERROR;
NhlString e_text;
int mesh_inited = 0;
Gint err_ind;
Gclip clip_ind_rect;
float *clvp;
int count;
int i;
NhlIsoLine *isolines, *ilp;
int trans_change_count;
Cnl = cnl;
Cnp = cnp;
Tmp = tmp;
ginq_clip(&err_ind,&clip_ind_rect);
gset_clip_ind(GIND_CLIP);
c_ctrset();
SetCtParams(cnl,entry_name);
subret = NhlVAGetValues(cnl->trans.overlay_trans_obj->base.id,
NhlNtrChangeCount,&trans_change_count,
NULL);
if (trans_change_count > tmp->trans_change_count) {
tmp->update_mode = TRIMESH_NEWMESH;
tmp->trans_change_count = trans_change_count;
}
/*
* Only set the ORV parameter if overlaying on EZMAP. It can cause
* problems otherwise. (Not sure yet whether it is needed in some cases
* though, and perhaps not needed in certain Ezmap cases.
*/
if (cnp->trans_obj->base.layer_class->base_class.class_name ==
NhlmapTransObjClass->base_class.class_name) {
NhlVAGetValues(cnp->trans_obj->base.id,
NhlNtrOutOfRangeF, &cnp->out_of_range_val,
NULL);
tmp->ezmap = 1;
c_ctsetr("ORV",cnp->out_of_range_val);
if (cnp->sfp->d_arr->num_dimensions == 1 &&
! (cnp->sfp->element_nodes ||
(cnp->sfp->x_cell_bounds && cnp->sfp->y_cell_bounds))) {
c_ctseti("MAP",Nhlcn1DMESHMAPVAL);
}
else {
c_ctseti("MAP",NhlcnMAPVAL);
}
}
else {
tmp->ezmap = 0;
c_ctseti("MAP",NhlcnTRIMESHMAPVAL);
}
c_ctseti("WSO", 3); /* error recovery on */
c_ctseti("NVS",0); /* no vertical strips */
c_ctseti("HLE",1); /* search for equal high/lows */
c_ctseti("SET",0);
c_ctseti("RWC",500);
c_ctseti("RWG",1500);
c_ctsetr("PIT",MAX(0.0,cnp->max_point_distance));
if (cnp->smoothing_on) {
c_ctsetr("T2D",cnp->smoothing_tension);
c_ctsetr("SSL",cnp->smoothing_distance);
}
else {
c_ctsetr("T2D",(float)0.0);
}
gset_fill_colr_ind((Gint)_NhlGetGksCi(cnl->base.wkptr,0));
subret = UpdateLineAndLabelParams(cnl,&cnp->do_lines,&cnp->do_labels);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return NULL;
}
/* Retrieve workspace pointers */
if ((cnp->fws = _NhlUseWorkspace(cnp->fws_id)) == NULL) {
e_text = "%s: error reserving float workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return(NULL);
}
if ((cnp->iws = _NhlUseWorkspace(cnp->iws_id)) == NULL) {
e_text = "%s: error reserving integer workspace";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return(NULL);
}
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return NULL;
}
if (n_levels <= 0) {
clvp = (float *) cnp->levels->data;
count = cnp->level_count;
}
else {
count = n_levels;
clvp = levels;
}
isolines = (NhlIsoLine *) NhlMalloc(sizeof(NhlIsoLine) * count);
if (! mesh_inited) {
subret = InitMesh(cnl,tmp,1,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
gset_clip_ind(clip_ind_rect.clip_ind);
ContourAbortDraw(cnl);
return NULL;
}
mesh_inited = 1;
}
for (i = 0, ilp = isolines; i < count; i++, ilp++) {
int flag,npoints;
NhlBoolean done = False;
float *xloc = NULL, *yloc = NULL;
int current_seg_alloc = 10;
int current_point_count = 0;
int current_seg = -1;
int j;
float save_xloc, save_yloc;
int same_segment;
int npoints_in_cur_segment;
TriBlock *tbp;
flag = 0;
/*
printf("Points for level %f:\n", clvp[i]);
*/
ilp->level = clvp[i];
ilp->x = ilp->y = NULL;
ilp->start_point = ilp->n_points = NULL;
tbp = &(tmp->tri_block[0]);
while (! done) {
subret = _NhlCtcltr(tbp->rpnt,tbp->iedg,tbp->itri,cnp->fws,cnp->iws,clvp[i],
&flag,&xloc,&yloc,&npoints,entry_name);
if ((ret = MIN(subret,ret)) < NhlWARNING) {
ContourAbortDraw(cnl);
gset_clip_ind(clip_ind_rect.clip_ind);
return NULL;
}
if (flag == 0)
break;
if (current_seg == -1) {
ilp->x = NhlMalloc(sizeof(float) * npoints);
ilp->y = NhlMalloc(sizeof(float) * npoints);
save_xloc = xloc[npoints-1];
save_yloc = yloc[npoints-1];
same_segment = 0;
}
else {
ilp->x = NhlRealloc(ilp->x, sizeof(float) * (current_point_count + npoints));
ilp->y = NhlRealloc(ilp->y, sizeof(float) * (current_point_count + npoints));
if (xloc[0] == save_xloc && yloc[0] == save_yloc) {
same_segment = 1;
npoints_in_cur_segment += npoints;
}
else {
same_segment = 0;
npoints_in_cur_segment = npoints;
}
save_xloc = xloc[npoints-1];
save_yloc = yloc[npoints-1];
}
memcpy((char*)(ilp->x + current_point_count),xloc, npoints * sizeof(float));
memcpy((char*)(ilp->y + current_point_count),yloc, npoints * sizeof(float));
if (tmp->ezmap) { /* points need to be transformed back into map coordinates */
double xlon, ylat,last_xlon;
int mod_360 = 0;
int k = current_point_count;
int first = 1;
for (j = current_point_count; j < current_point_count + npoints; j++) {
c_mdptri((double)ilp->x[j],(double)ilp->y[j],&ylat,&xlon);
if (xlon > 1e10)
continue;
if (first) {
last_xlon = xlon;
first = 0;
}
switch (mod_360) {
case 0:
default:
if (last_xlon - xlon < -180) {
mod_360 = -1;
}
else if (last_xlon - xlon > 180) {
mod_360 = 1;
}
break;
case 1:
if (xlon - last_xlon > 180) {
mod_360 = 0;
}
break;
case -1:
if (xlon - last_xlon < -180) {
mod_360 = 0;
}
break;
}
ilp->x[k] = (float)xlon + mod_360 * 360;
ilp->y[k] = (float)ylat;
last_xlon = xlon;
k++;
}
npoints = k - current_point_count;
}
if (npoints == 0)
continue;
if (same_segment) {
ilp->n_points[current_seg] += npoints;
}
else {
current_seg++;
if (current_seg == 0) {
ilp->n_points = NhlMalloc(sizeof(int) * current_seg_alloc);
ilp->start_point = NhlMalloc(sizeof(int) * current_seg_alloc);
}
else if (current_seg == current_seg_alloc) {
ilp->n_points = NhlRealloc(ilp->n_points,sizeof(int) * current_seg_alloc * 2);
ilp->start_point = NhlRealloc(ilp->start_point,sizeof(int) * current_seg_alloc * 2);
current_seg_alloc *= 2;
}
ilp->n_points[current_seg] = npoints;
ilp->start_point[current_seg] = current_point_count;
}
current_point_count += npoints;
}
ilp->point_count = current_point_count;
ilp->n_segments = current_seg + 1;
}
if (cnp->fws != NULL) {
subret = _NhlIdleWorkspace(cnp->fws);
ret = MIN(subret,ret);
cnp->fws = NULL;
}
if (cnp->iws != NULL) {
subret = _NhlIdleWorkspace(cnp->iws);
cnp->iws = NULL;
ret = MIN(subret,ret);
}
if (ret < NhlWARNING)
return NULL;
return isolines;
}
typedef struct {
float vx,vy,c;
} PlaneSet;
/*
* Function: _NhlUnstructuredMeshFill
*
* Description: performs a mesh raster fill -
* replaces Conpack routine CPCICA - Conpack must be initialized, etc.
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
NhlErrorTypes _NhlUnstructuredMeshFill
#if NhlNeedProto
(
int *cell,
int ica1,
int icam,
int ican,
float xcpf,
float ycpf,
float xcqf,
float ycqf,
float min_cell_size,
NhlBoolean smooth,
char *entry_name
)
#else
(cell,ica1,icam,ican,
xcpf,ycpf,xcqf,ycqf,min_cell_size,smooth,entry_name)
int *cell;
int ica1;
int icam;
int ican;
float xcpf;
float ycpf;
float xcqf;
float ycqf;
float min_cell_size;
NhlBoolean smooth;
char *entry_name;
#endif
{
NhlContourPlotLayerPart *cnp = Cnp;
NhlErrorTypes ret = NhlNOERROR;
char *e_text;
NhlBoolean ezmap = False;
float *xv, *yv,*zdat;
float *levels;
int nv;
float cxstep,cystep;
float tol1,tol2;
float xsoff,xeoff,ysoff,yeoff;
int i,j;
int grid_fill_ix;
int cell_count;
float avg_cells_per_grid_box;
float mflx,mfby,mfrx,mfuy;
float min_minx, max_maxx, min_miny, max_maxy;
float max_coverage = 0;
int twice;
int icaf, map;
float orv,spv;
if (cnp == NULL) {
e_text = "%s: invalid call to _NhlRasterFill";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
return(NhlFATAL);
}
if (cnp->trans_obj->base.layer_class->base_class.class_name ==
NhlmapTransObjClass->base_class.class_name) {
ezmap = True;
}
levels = (float*) cnp->levels->data;
c_ctgetr("ORV",&orv);
c_ctgeti("CAF",&icaf);
c_ctgeti("MAP",&map);
c_cpgetr("SPV",&spv);
nv = cnp->sfp->x_cell_bounds->len_dimensions[1];
cell_count = cnp->sfp->x_cell_bounds->len_dimensions[0];
xv = (float*)cnp->sfp->x_cell_bounds->data;
yv = (float*)cnp->sfp->y_cell_bounds->data;
zdat = (float*)cnp->sfp->d_arr->data;
cxstep = (xcqf-xcpf)/(float)icam;
cystep = (ycqf-ycpf)/(float)ican;
xsoff = Xsoff + .5 * (1.0 - Xsoff);
xeoff = Xeoff + .5 * (1.0 - Xeoff);
ysoff = Ysoff + .5 * (1.0 - Ysoff);
yeoff = Yeoff + .5 * (1.0 - Yeoff);
tol1 = 0.00001 * MIN(Cnl->view.width,Cnl->view.height);
tol2 = 0.5 * MIN(Cnl->view.width,Cnl->view.height);
/*
* initialize cell array with the missing value.
*/
grid_fill_ix = Cnp->grid_bound.gks_fcolor;
grid_fill_ix = grid_fill_ix < 0 ? NhlBACKGROUND : grid_fill_ix;
for (j = 0; j < ican; j++) {
for (i = 0; i < icam; i++) {
*(cell + j * ica1 + i) = grid_fill_ix;
}
}
avg_cells_per_grid_box = (icam * ican) / ((float)cell_count);
/*
printf("in unstructured mesh fill\n");
printf("avg_cells_per_grid_box = %f\n",avg_cells_per_grid_box);
printf("icam %d ican %d\n",icam,ican);
*/
mflx = xcpf - (xcqf - xcpf) * .1;
mfrx = xcqf + (xcqf - xcpf) * .1;
mfby = ycpf - (ycqf - ycpf) * .1;
mfuy = ycqf + (ycqf - ycpf) * .1;
min_minx = min_miny = 1e30;
max_maxx = max_maxy = 0;
twice = 0;
for (i = 0; i < cell_count; i++) {
float xi[10], yi[10],xo[10],yo[10],xp[10],yp[10];
float minx,miny,maxx,maxy;
int flip_edge;
int status;
PlaneSet *pps, ps[10];
int p,p1,p2,p0;
int jcv,icv;
int iplus,jplus;
int iaid,k;
float fvali;
memcpy(xi,xv + i * nv,nv * sizeof(float));
memcpy(yi,yv + i * nv,nv * sizeof(float));
minx = miny = 1e30;
maxx = maxy = 0;
_NhlDataToWin(Cnp->trans_obj,xi,yi,
nv,xo,yo,&status,
NULL,NULL);
if (status) {
continue;
}
for (p = 0; p < nv; p++) {
float tx,ty;
tx = c_cufx(xo[p]);
ty = c_cufy(yo[p]);
#if 1
if (tx < mflx || tx > mfrx || ty < mfby || ty > mfuy) {
status = 1;
break;
}
#endif
xp[p] = (tx - xcpf) / cxstep;
yp[p] = (ty - ycpf) / cxstep;
if (xp[p] < minx)
minx = xp[p];
if (xp[p] > maxx)
maxx = xp[p];
if (yp[p] < miny)
miny = yp[p];
if (yp[p] > maxy)
maxy = yp[p];
}
#if 1
if (maxx - minx > icam / 2.0) {
float new_maxx = -1e30,new_minx = 1e30;
twice = 2;
for (p = 0; p < nv; p++) {
if (xp[p] < icam / 2) {
xp[p] += (float) icam;
}
if (xp[p] < new_minx)
new_minx = xp[p];
if (xp[p] > new_maxx)
new_maxx = xp[p];
}
maxx = new_maxx;
minx = new_minx;
}
#endif
pps = &ps[0];
flip_edge = (xp[0] - xp[1]) * (yp[1] - yp[2]) >
(yp[0] - yp[1]) * (xp[1] - xp[2]);
for (p1 = 0, p2 = 1; p2 < nv; p1 = p2, p2++,pps++) {
pps->vx = yp[p1] - yp[p2];
pps->vy = xp[p2] - xp[p1];
pps->c = pps->vx * xp[p1] + pps->vy * yp[p1];
/* check sense and reverse plane edge if need be */
if ( flip_edge ) {
pps->vx = -pps->vx ;
pps->vy = -pps->vy ;
pps->c = -pps->c ;
}
}
/*printf("coverage : %f\n", ((maxy + 1) - miny) * ((maxx +1) - minx));*/
if (((maxy + 1) - miny) * ((maxx +1) - minx) > max_coverage)
max_coverage = ((maxy + 1) - miny) * ((maxx +1) - minx);
if (((maxy + 1) - miny) * ((maxx +1) - minx) > 400 * avg_cells_per_grid_box)
continue;
if (minx < min_minx) min_minx = minx;
if (miny < min_miny) min_miny = miny;
if (maxx > max_maxx) max_maxx = maxx;
if (maxy > max_maxy) max_maxy = maxy;
for (jcv = MAX(0,(int) miny); jcv < MIN(ican,(int) (maxy + 1)); jcv++) {
float ty = jcv + .5;
for (icv = MAX(0,(int) minx); icv < MAX(icam,(int) (maxx + 1)); icv++) {
float tx = icv +.5;
for (p0 = nv-1, pps = ps; --p0; pps++) {
if (pps->vx * tx + pps->vy * ty > pps->c) {
break;
}
}
if (p0 > 0)
continue;
/*iplus = icv % icam;*/
iplus = MIN(icv, icam);
jplus = jcv;
fvali = zdat[i];
iaid = -1;
if (spv != 0.0 &&
fvali == spv)
iaid = 98;
else {
for (k=0; k < Cnp->level_count; k++) {
if (fvali < levels[k]) {
iaid = NhlcnAREAID_OFFSET+k;
break;
}
}
}
if (iaid == -1)
iaid = NhlcnAREAID_OFFSET +
Cnp->level_count;
(_NHLCALLF(hluctscae,HLUCTSCAE))
(cell,&ica1,&icam,&ican,
&xcpf,&ycpf,&xcqf,&ycqf,
&iplus,&jplus,&icaf,&iaid);
}
}
}
return ret;
}
#define HERO(A,B,C) \
sqrt(MAX(0.,((A)+(B)+(C))*((B)+(C)-(A))*((A)+(C)-(B))*((A)+(B)-(C))))
/*
* Function: _NhlTriMeshRasterFill
*
* Description: performs a discrete raster fill -
* replaces Conpack routine CPCICA - Conpack must be initialized, etc.
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
NhlErrorTypes _NhlTriMeshRasterFill
#if NhlNeedProto
(
float *rpnt,
int *iedg,
int *itri,
int *cell,
int ica1,
int icam,
int ican,
float xcpf,
float ycpf,
float xcqf,
float ycqf,
void *info,
char *entry_name
)
#else
(rpnt,iedg,itri,cell,ica1,icam,ican,
xcpf,ycpf,xcqf,ycqf,entry_name)
float *rpnt,
int *iedg,
int *itri,
int *cell;
int ica1;
int icam;
int ican;
float xcpf;
float ycpf;
float xcqf;
float ycqf;
void *info;
char *entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
char *e_text;
int i,j,k,n,icaf,map,imap,iaid;
float xccf,xccd,xcci,yccf,yccd,ycci;
float zval,orv;
float *levels;
float cxstep,cystep;
float xsoff,xeoff,ysoff,yeoff;
NhlBoolean x_isbound,y_isbound;
float tol1,tol2;
int ipp1,ipp2,ipp3;
float xcu1,xcu2,xcu3,ycu1,ycu2,ycu3;
float xcf1,xcf2,xcf3,ycf1,ycf2,ycf3;
float xd12,xd23,xd31,yd12,yd23,yd31;
float fva1,fva2,fva3;
float dn12,dn23,dn31;
int bound1,bound2;
int ibeg,iend,jbeg,jend;
int grid_fill_ix;
TriBlock *tbp;
if (! info) {
tbp = Tbp;
}
else {
tbp = (TriBlock *) info;
}
/*
#ifdef _OPENMP
{
int tid = omp_get_thread_num();
printf("%d x s&e %f %f y s&e %f %f\n", tid, tbp->xs,tbp->xe,tbp->ys,tbp->ye);
}
#endif
*/
if (Cnp == NULL) {
e_text = "%s: invalid call to _NhlRasterFill";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
return(NhlFATAL);
}
levels = (float*) Cnp->levels->data;
/*
* replacement for CTCICA
*/
c_ctgetr("ORV",&orv);
c_ctgeti("CAF",&icaf);
c_ctgeti("MAP",&map);
cxstep = (xcqf-xcpf)/(float)icam;
cystep = (ycqf-ycpf)/(float)ican;
x_isbound = Cnp->sfp->xc_is_bounds;
y_isbound = Cnp->sfp->yc_is_bounds;
xsoff = Xsoff + .5 * (1.0 - Xsoff);
xeoff = Xeoff + .5 * (1.0 - Xeoff);
ysoff = Ysoff + .5 * (1.0 - Ysoff);
yeoff = Yeoff + .5 * (1.0 - Yeoff);
tol1 = 0.00001 * MIN(Cnl->view.width,Cnl->view.height);
tol2 = 0.5 * MIN(Cnl->view.width,Cnl->view.height);
/*
* Now overwrite out-of-range areas with the out-of-range color
*/
grid_fill_ix = Cnp->out_of_range.gks_fcolor < 0 ? NhlTRANSPARENT_CI : Cnp->out_of_range.gks_fcolor;
if (Tmp->ezmap) {
imap = -map;
zval = 0;
for (j = 0; j < ican; j++) {
if (j == 0)
yccf = ycpf + ysoff * cystep;
else if (j == ican - 1)
yccf = ycpf + (ican - yeoff) * cystep;
else
yccf = ycpf + (j + ysoff) * cystep;
yccd = c_cfuy(yccf);
for (i = 0; i < icam; i++) {
if (i == 0)
xccf = xcpf + xsoff * cxstep;
else if (i == icam - 1)
xccf = xcpf + (icam - xeoff) * cxstep;
else
xccf = xcpf + (i+xsoff) * cxstep;
xccd = c_cfux(xccf);
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&imap,&xccd,&yccd,&zval,&xcci,&ycci);
if (xcci == orv) {
*(cell + j * ica1 + i) = grid_fill_ix;
}
}
}
}
/*
* examine each triangle in turn
*/
for (n = 0; n <= tbp->ntri - Lotn; n += Lotn) {
if (itri[n+3] != 0)
continue;
/*
* project point 1; if invisible skip it.
*/
if (iedg[itri[n]] == iedg[itri[n+1]] ||
iedg[itri[n]] == iedg[itri[n+1]+1]) {
ipp1 = iedg[itri[n]];
}
else {
ipp1 = iedg[itri[n]+1];
}
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&map,&rpnt[ipp1],&rpnt[ipp1+1],&rpnt[ipp1+2],
&xcu1,&ycu1);
if (orv != 0.0 && (xcu1 == orv || ycu1 == orv))
continue;
/*
* project point 2; if invisible skip the triangle
*/
if (iedg[itri[n+1]] == iedg[itri[n+2]] ||
iedg[itri[n+1]] == iedg[itri[n+2]+1]) {
ipp2 = iedg[itri[n+1]];
}
else {
ipp2 = iedg[itri[n+1]+1];
}
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&map,&rpnt[ipp2],&rpnt[ipp2+1],&rpnt[ipp2+2],
&xcu2,&ycu2);
if (orv != 0.0 && (xcu2 == orv || ycu2 == orv))
continue;
/*
* project point 3; if invisible skip the triangle
*/
if (iedg[itri[n+2]] == iedg[itri[n]] ||
iedg[itri[n+2]] == iedg[itri[n]+1]) {
ipp3 = iedg[itri[n+2]];
}
else {
ipp3 = iedg[itri[n+2]+1];
}
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&map,&rpnt[ipp3],&rpnt[ipp3+1],&rpnt[ipp3+2],
&xcu3,&ycu3);
if (orv != 0.0 && (xcu3 == orv || ycu3 == orv))
continue;
xcf1 = c_cufx(xcu1);
ycf1 = c_cufy(ycu1);
xcf2 = c_cufx(xcu2);
ycf2 = c_cufy(ycu2);
xcf3 = c_cufx(xcu3);
ycf3 = c_cufy(ycu3);
xd12 = xcf2 - xcf1;
yd12 = ycf2 - ycf1;
xd23 = xcf3 - xcf2;
yd23 = ycf3 - ycf2;
xd31 = xcf1 - xcf3;
yd31 = ycf1 - ycf3;
/*
* skip triangle if too small or too large
*/
if ((fabs(xd12) < tol1 && fabs(yd12) < tol1) ||
(fabs(xd23) < tol1 && fabs(yd23) < tol1) ||
(fabs(xd31) < tol1 && fabs(yd31) < tol1))
continue;
if ((fabs(xd12) > tol2 || fabs(yd12) > tol2) ||
(fabs(xd23) > tol2 || fabs(yd23) > tol2) ||
(fabs(xd31) > tol2 || fabs(yd31) > tol2))
continue;
/*
* get the field values at the 3 points of the triangle
*/
fva1 = rpnt[ipp1+3];
fva2 = rpnt[ipp2+3];
fva3 = rpnt[ipp3+3];
/*
* compute triangle lengths and the area
*/
dn12 = sqrt(xd12*xd12 + yd12 * yd12);
dn23 = sqrt(xd23*xd23 + yd23 * yd23);
dn31 = sqrt(xd31*xd31 + yd31 * yd31);
/*
* Now set loop limits to examine center points of all cells that overlap
* the bounding box of the triangle
*/
bound1 = MAX(0,
MIN(icam-1,(int)
((MIN(xcf1,MIN(xcf2,xcf3)) - xcpf) /
(xcqf-xcpf) * (float) icam)));
bound2 = MAX(0,
MIN(icam-1,(int)
((MAX(xcf1,MAX(xcf2,xcf3)) - xcpf) /
(xcqf-xcpf) * (float) icam)));
ibeg = MIN(bound1,bound2);
iend = MAX(bound1,bound2);
bound1 = MAX(0,
MIN(ican-1,(int)
((MIN(ycf1,MIN(ycf2,ycf3)) - ycpf) /
(ycqf-ycpf) * (float) ican)));
bound2 = MAX(0,
MIN(ican-1,(int)
((MAX(ycf1,MAX(ycf2,ycf3)) - ycpf) /
(ycqf-ycpf) * (float) ican)));
jbeg = MIN(bound1,bound2);
jend = MAX(bound1,bound2);
/*
* find each cell whose center point lies within the triangle and
* set its color index appropriately
*/
for (j = jbeg; j <= jend; j++) {
float ts12,ts23,ts31;
float dnc1,dnc2,dnc3;
float yfp,xfp;
int jplus = j+1;
float a1,a2,a3;
yfp = ycpf + ((float)j+.5)/ican * (ycqf - ycpf);
for (i = ibeg; i <= iend; i++) {
float atot;
int iplus = i+1;
xfp = xcpf + ((float)i+.5)/icam * (xcqf - xcpf);
ts12 = (yd12*xfp-xd12*yfp-yd12*xcf1+xd12*ycf1)/
dn12;
ts23 = (yd23*xfp-xd23*yfp-yd23*xcf2+xd23*ycf2)/
dn23;
ts31 = (yd31*xfp-xd31*yfp-yd31*xcf3+xd31*ycf3)/
dn31;
if ((ts12 < 0.00001 && ts23 < 0.00001 &&
ts31 < 0.00001) ||
(ts12 > -0.00001 && ts23 > -0.00001 &&
ts31 > -0.00001)) {
float xd1,xd2,xd3,yd1,yd2,yd3;
float fvali;
xd1 = xfp - xcf1;
xd2 = xfp - xcf2;
xd3 = xfp - xcf3;
yd1 = yfp - ycf1;
yd2 = yfp - ycf2;
yd3 = yfp - ycf3;
dnc1 = sqrt(xd1*xd1 + yd1*yd1);
dnc2 = sqrt(xd2*xd2 + yd2*yd2);
dnc3 = sqrt(xd3*xd3 + yd3*yd3);
a1 = HERO(dn23,dnc2,dnc3);
a2 = HERO(dn31,dnc3,dnc1);
a3 = HERO(dn12,dnc1,dnc2);
atot = a1 + a2 + a3;
if (atot == 0.0)
continue;
if (Cnp->raster_smoothing_on) {
fvali = (fva1 * a1 +
fva2 * a2 + fva3 * a3) / atot;
}
else if (a1 > a2 && a1 > a3) {
fvali = fva1;
}
else if (a2 > a1 && a2 > a3) {
fvali = fva2;
}
else {
fvali = fva3;
}
iaid = -1;
for (k=0; k < Cnp->level_count; k++) {
if (fvali < levels[k]) {
iaid = NhlcnAREAID_OFFSET+k;
break;
}
}
if (iaid == -1)
iaid = NhlcnAREAID_OFFSET +
Cnp->level_count;
(_NHLCALLF(hluctscae,HLUCTSCAE))
(cell,&ica1,&icam,&ican,
&xcpf,&ycpf,&xcqf,&ycqf,
&iplus,&jplus,&icaf,&iaid);
}
}
}
}
#if 0
{
for (j = 0; j < ican; j++) {
int found = 0;
for (i = 2 ; i < icam; i++) {
if (!found && cell[j * icam + i] == 1073741824) {
continue;
}
found = 1;
if (cell[j * icam + i] == 1073741824) {
printf("row %d last col %d last 2 val %d %d\n", j, i,cell[j * icam + i -1],cell[j * icam + i -2]);
break;
}
}
}
}
#endif
return ret;
}
/*
* Function: CnTriMeshWriteCellData
*
* Description: Writes out the interpolated data associated with each
* cell. This is a way of interpolating from one grid to another.
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
NhlErrorTypes CnTriMeshWriteCellData
#if NhlNeedProto
(
float *rpnt,
int *iedg,
int *itri,
int icam,
int ican,
float xcpf,
float ycpf,
float xcqf,
float ycqf,
char *entry_name
)
#else
(rpnt,iedg,itri,ica1,icam,ican,
xcpf,ycpf,xcqf,ycqf,entry_name)
float *rpnt,
int *iedg,
int *itri,
int icam;
int ican;
float xcpf;
float ycpf;
float xcqf;
float ycqf;
char *entry_name;
#endif
{
NhlErrorTypes ret = NhlNOERROR;
char *e_text;
int i,j,n,icaf,map;
float orv;
float *levels;
double cxstep,cystep;
double xsoff,xeoff,ysoff,yeoff;
NhlBoolean x_isbound,y_isbound;
double tol1,tol2;
int ipp1,ipp2,ipp3;
float xcu1,xcu2,xcu3,ycu1,ycu2,ycu3;
double xcf1,xcf2,xcf3,ycf1,ycf2,ycf3;
double xd12,xd23,xd31,yd12,yd23,yd31;
double fva1,fva2,fva3;
double dn12,dn23,dn31;
int bound1,bound2;
int ibeg,iend,jbeg,jend;
float *data;
float init_val;
FILE *fp;
float out_of_range;
int count;
float wlx,wrx,wby,wuy,wxstep,wystep;
int licam,lican;
/*
printf("in CnWriteCellData\n");
*/
if (Cnp == NULL) {
e_text = "%s: invalid call to _NhlRasterFill";
NhlPError(NhlFATAL,NhlEUNKNOWN,e_text,entry_name);
return(NhlFATAL);
}
NhlVAGetValues(Cnp->trans_obj->base.id,
NhlNtrOutOfRangeF,&out_of_range,NULL);
if (Cnp->sfp->missing_value_set) {
init_val = Cnp->sfp->missing_value;
}
else {
init_val = 1E32;
}
levels = (float*) Cnp->levels->data;
wlx = c_cfux(xcpf);
wrx = c_cfux(xcqf);
wby = c_cfuy(ycpf);
wuy = c_cfuy(ycqf);
wxstep = (wrx - wlx) / (icam);
wystep = (wuy - wby) / (ican);
/*
* replacement for CTCICA
*/
c_ctgetr("ORV",&orv);
c_ctgeti("CAF",&icaf);
c_ctgeti("MAP",&map);
cxstep = (xcqf-xcpf)/(double)icam;
cystep = (ycqf-ycpf)/(double)ican;
x_isbound = Cnp->sfp->xc_is_bounds;
y_isbound = Cnp->sfp->yc_is_bounds;
xsoff = Xsoff + .5 * (1.0 - Xsoff);
xeoff = Xeoff + .5 * (1.0 - Xeoff);
ysoff = Ysoff + .5 * (1.0 - Ysoff);
yeoff = Yeoff + .5 * (1.0 - Yeoff);
tol1 = 0.00001 * MIN(Cnl->view.width,Cnl->view.height);
tol2 = 0.5 * MIN(Cnl->view.width,Cnl->view.height);
/*
* initialize data array.
* make the data array larger by 2 because the outer edges
* never get written using this algorithm.
*/
licam = icam + 2;
lican = ican + 2;
data = NhlMalloc(licam * lican * sizeof(float));
if (!data) {
NHLPERROR((NhlFATAL,ENOMEM,NULL));
return NhlFATAL;
}
for (j = 0; j < lican; j++) {
for (i = 0; i < licam; i++) {
*(data + j * licam + i) = init_val;
}
}
/*
* examine each triangle in turn
*/
for (n = 0; n < Tbp->ntri - Lotn; n += Lotn) {
if (itri[n+3] != 0)
continue;
/*
* project point 1; if invisible skip it.
*/
if (iedg[itri[n]] == iedg[itri[n+1]] ||
iedg[itri[n]] == iedg[itri[n+1]+1]) {
ipp1 = iedg[itri[n]];
}
else {
ipp1 = iedg[itri[n]+1];
}
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&map,&rpnt[ipp1],&rpnt[ipp1+1],&rpnt[ipp1+2],
&xcu1,&ycu1);
if (orv != 0.0 && (xcu1 == orv || ycu1 == orv))
continue;
/*
* project point 2; if invisible skip the triangle
*/
if (iedg[itri[n+1]] == iedg[itri[n+2]] ||
iedg[itri[n+1]] == iedg[itri[n+2]+1]) {
ipp2 = iedg[itri[n+1]];
}
else {
ipp2 = iedg[itri[n+1]+1];
}
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&map,&rpnt[ipp2],&rpnt[ipp2+1],&rpnt[ipp2+2],
&xcu2,&ycu2);
if (orv != 0.0 && (xcu2 == orv || ycu2 == orv))
continue;
/*
* project point 3; if invisible skip the triangle
*/
if (iedg[itri[n+2]] == iedg[itri[n]] ||
iedg[itri[n+2]] == iedg[itri[n]+1]) {
ipp3 = iedg[itri[n+2]];
}
else {
ipp3 = iedg[itri[n+2]+1];
}
(_NHLCALLF(hluctmxyz,HLUCTMXYZ))
(&map,&rpnt[ipp3],&rpnt[ipp3+1],&rpnt[ipp3+2],
&xcu3,&ycu3);
if (orv != 0.0 && (xcu3 == orv || ycu2 == orv))
continue;
xcf1 = (double)c_cufx(xcu1);
ycf1 = (double)c_cufy(ycu1);
xcf2 = (double)c_cufx(xcu2);
ycf2 = (double)c_cufy(ycu2);
xcf3 = (double)c_cufx(xcu3);
ycf3 = (double)c_cufy(ycu3);
xd12 = xcf2 - xcf1;
yd12 = ycf2 - ycf1;
xd23 = xcf3 - xcf2;
yd23 = ycf3 - ycf2;
xd31 = xcf1 - xcf3;
yd31 = ycf1 - ycf3;
/*
* skip triangle if too small or too large
*/
if ((fabs(xd12) < tol1 && fabs(yd12) < tol1) ||
(fabs(xd23) < tol1 && fabs(yd23) < tol1) ||
(fabs(xd31) < tol1 && fabs(yd31) < tol1))
continue;
if ((fabs(xd12) > tol2 || fabs(yd12) > tol2) ||
(fabs(xd23) > tol2 || fabs(yd23) > tol2) ||
(fabs(xd31) > tol2 || fabs(yd31) > tol2))
continue;
/*
* get the field values at the 3 points of the triangle
*/
fva1 = rpnt[ipp1+3];
fva2 = rpnt[ipp2+3];
fva3 = rpnt[ipp3+3];
/*
* compute triangle lengths and the area
*/
dn12 = sqrt(xd12*xd12 + yd12 * yd12);
dn23 = sqrt(xd23*xd23 + yd23 * yd23);
dn31 = sqrt(xd31*xd31 + yd31 * yd31);
/*
* Now set loop limits to examine center points of all cells that overlap
* the bounding box of the triangle
*/
bound1 = MAX(0,
MIN(licam-1,(int)
((MIN(xcf1,MIN(xcf2,xcf3)) - xcpf) /
(xcqf-xcpf) * (float) licam)));
bound2 = MAX(0,
MIN(licam-1,(int)
((MAX(xcf1,MAX(xcf2,xcf3)) - xcpf) /
(xcqf-xcpf) * (float) licam)));
ibeg = MIN(bound1,bound2);
iend = MAX(bound1,bound2);
bound1 = MAX(0,
MIN(lican-1,(int)
((MIN(ycf1,MIN(ycf2,ycf3)) - ycpf) /
(ycqf-ycpf) * (float) lican)));
bound2 = MAX(0,
MIN(lican-1,(int)
((MAX(ycf1,MAX(ycf2,ycf3)) - ycpf) /
(ycqf-ycpf) * (float) lican)));
jbeg = MIN(bound1,bound2);
jend = MAX(bound1,bound2);
/*
* find each cell whose center point lies within the triangle and
* set its color index appropriately
*/
for (j = jbeg; j <= jend; j++) {
double ts12,ts23,ts31;
double dnc1,dnc2,dnc3;
double yfp,xfp;
double a1,a2,a3;
yfp = ycpf + ((double)j+.5)/lican * (ycqf - ycpf);
for (i = ibeg; i <= iend; i++) {
double atot;
xfp = xcpf + ((float)i+.5)/licam * (xcqf - xcpf);
ts12 = (yd12*xfp-xd12*yfp-yd12*xcf1+xd12*ycf1)/
dn12;
ts23 = (yd23*xfp-xd23*yfp-yd23*xcf2+xd23*ycf2)/
dn23;
ts31 = (yd31*xfp-xd31*yfp-yd31*xcf3+xd31*ycf3)/
dn31;
if ((ts12 < 0.00001 && ts23 < 0.00001 &&
ts31 < 0.00001) ||
(ts12 > -0.00001 && ts23 > -0.00001 &&
ts31 > -0.00001)) {
float xd1,xd2,xd3,yd1,yd2,yd3;
float fvali;
xd1 = xfp - xcf1;
xd2 = xfp - xcf2;
xd3 = xfp - xcf3;
yd1 = yfp - ycf1;
yd2 = yfp - ycf2;
yd3 = yfp - ycf3;
dnc1 = sqrt(xd1*xd1 + yd1*yd1);
dnc2 = sqrt(xd2*xd2 + yd2*yd2);
dnc3 = sqrt(xd3*xd3 + yd3*yd3);
a1 = HERO(dn23,dnc2,dnc3);
a2 = HERO(dn31,dnc3,dnc1);
a3 = HERO(dn12,dnc1,dnc2);
atot = a1 + a2 + a3;
#if 0
if (a1 > a2 && a1 > a3)
fvali = fva1;
else if (a2 > a1 && a2 > a3)
fvali = fva2;
else
fvali = fva3;
#endif
if (atot == 0.0)
continue;
fvali = (fva1 * a1 +
fva2 * a2 + fva3 * a3) / atot;
*(data + j * licam + i) = (float)fvali;
}
}
}
}
fp = fopen("tmp.bin","w");
for (j = 1; j <= ican; j++) {
float *d = data + j * licam + 1;
count = fwrite(d,sizeof(float),icam,fp);
if (count < icam) {
NhlPError(NhlFATAL,NhlEUNKNOWN,
"Error writing output file\n");
return NhlFATAL;
}
}
fclose(fp);
fp = fopen("tmp-lon.bin","w");
for (i = 0; i < icam; i++) {
float lon = wlx + i * wxstep;
fwrite(&lon,sizeof(float),1,fp);
}
fclose(fp);
fp = fopen("tmp-lat.bin","w");
for (j = 0; j < ican; j++) {
float lat = wby + j * wystep;
fwrite(&lat,sizeof(float),1,fp);
}
fclose(fp);
NhlFree(data);
return ret;
}
/*
* Function: hluctfill
*
* Description: C version of APR user routine called from within ARSCAM
* to fill areas based on the area ID.
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
int (_NHLCALLF(hluctfill,HLUCTFILL))
#if NhlNeedProto
(
float *xcs,
float *ycs,
int *ncs,
int *iai,
int *iag,
int *nai
)
#else
(xcs,ycs,ncs,iai,iag,nai)
float *xcs;
float *ycs;
int *ncs;
int *iai;
int *iag;
int *nai;
#endif
{
int i;
int pat_ix, col_ix;
float fscale;
int *colp, *patp;
float *sclp;
if (Cnp == NULL) return 0;
for (i = 0; i < *nai; i++) {
#if 0
printf("hluctfill i %d iai %d iag %d\n",i,iai[i],iag[i]);
#endif
if (iai[i] == 9999) {
return 0;
}
if (iag[i] == 5 && iai[i] == -1) {
return 0;
}
}
colp = (int *) Cnp->fill_colors->data;
patp = (int *) Cnp->fill_patterns->data;
sclp = (float *) Cnp->fill_scales->data;
for (i = 0; i < *nai; i++) {
if (iag[i] == 3) {
if (iai[i] > 99 &&
iai[i] < 100 + Cnp->fill_count) {
int ix = iai[i] - 100;
col_ix = Cnp->mono_fill_color ?
Cnp->fill_color : colp[ix];
pat_ix = Cnp->mono_fill_pattern ?
Cnp->fill_pattern : patp[ix];
fscale = Cnp->mono_fill_scale ?
Cnp->fill_scale : sclp[ix];
}
else {
NhlcnRegionAttrs *reg_attrs;
#if 0
printf("hluctfill region i %d iai %d iag %d\n",i,iai[i],iag[i]);
#endif
switch (iai[i]) {
case 99:
case 98:
col_ix = MAX(Cnp->missing_val.fill_color,Cnp->grid_bound.fill_color);
pat_ix = MAX(Cnp->missing_val.fill_pat,Cnp->grid_bound.fill_pat);
fscale = Cnp->missing_val.fill_scale == 1.0 ?
Cnp->grid_bound.fill_scale : Cnp->missing_val.fill_scale;
break;
case 97:
reg_attrs = &Cnp->out_of_range;
col_ix = reg_attrs->fill_color;
pat_ix = reg_attrs->fill_pat;
fscale = reg_attrs->fill_scale;
break;
default:
return 0;
}
}
float fill_opacity;
NhlVAGetValues(Cnl->base.wkptr->base.id,
_NhlNwkFillOpacityF, &fill_opacity,
NULL);
NhlVASetValues(Cnl->base.wkptr->base.id,
_NhlNwkFillIndex, pat_ix,
_NhlNwkFillColor, col_ix,
_NhlNwkFillOpacityF, Cnp->fill_opacity,
_NhlNwkFillScaleFactorF,fscale,
_NhlNwkFillBackground,
Cnp->fill_background_color,
_NhlNwkFillDotSizeF,Cnp->fill_dot_size,
_NhlNwkEdgesOn,0,
NULL);
_NhlSetFillInfo(Cnl->base.wkptr,(NhlLayer) Cnl);
_NhlWorkstationFill(Cnl->base.wkptr,xcs,ycs,*ncs);
NhlVASetValues(Cnl->base.wkptr->base.id,
_NhlNwkFillOpacityF, fill_opacity,
NULL);
}
}
return 0;
}
/*
* Function: hluctscae
*
* Description:
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
void (_NHLCALLF(hluctscae,HLUCTSCAE))
#if NhlNeedProto
(
int *icra,
int *ica1,
int *icam,
int *ican,
float *xcpf,
float *ycpf,
float *xcqf,
float *ycqf,
int *ind1,
int *ind2,
int *icaf,
int *iaid
)
#else
(icra,ica1,icam,ican,xcpf,ycpf,xcqf,ycqf,ind1,ind2,icaf,iaid)
int *icra;
int *ica1;
int *icam;
int *ican;
float *xcpf;
float *ycpf;
float *xcqf;
float *ycqf;
int *ind1;
int *ind2;
int *icaf;
int *iaid;
#endif
{
int col_ix;
if (Cnp == NULL) {
_NHLCALLF(ctscae,CTSCAE)
(icra,ica1,icam,ican,xcpf,ycpf,xcqf,ycqf,
ind1,ind2,icaf,iaid);
return;
}
/* no support in cell arrays for transparent, so it's necessary
* to reset transparent color indexes to background
* 5-29-2013 - this is no longer true. Replace NhlTRANSPARENT with a transparent color index.
*/
if (*iaid > 99 && *iaid < 100 + Cnp->fill_count) {
col_ix = Cnp->gks_fill_colors[*iaid - 100];
if (col_ix < 0) col_ix = NhlTRANSPARENT_CI;
}
else if (*iaid == 99) {
#if 0
printf("hluctscae iaid = %d\n",*iaid);
#endif
col_ix = Cnp->missing_val.gks_fcolor;
if (col_ix <= 0 && Cnp->grid_bound.gks_fcolor > 0)
col_ix = Cnp->grid_bound.gks_fcolor;
if (col_ix < 0)
col_ix = NhlTRANSPARENT_CI;
}
else if (*iaid == 97) {
col_ix = Cnp->out_of_range.gks_fcolor;
if (col_ix < 0)
col_ix = NhlTRANSPARENT_CI;
#if 0
printf("hluctscae iaid = %d\n",*iaid);
#endif
}
else {
#if 0
printf("hluctscae iaid = %d\n",*iaid);
#endif
col_ix = NhlTRANSPARENT_CI;
}
*(icra + ((*ind2 - 1) * *ica1 + (*ind1 - 1))) = col_ix;
return;
}
/*
* Function: hluctchcl
*
* Description: C version of the CPCHCL function that is called from
* the Conpack CPCLDR and CPCLDM functions.
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
void (_NHLCALLF(hluctchcl,HLUCTCHCL))
#if NhlNeedProto
(
int *iflg
)
#else
(iflg)
int *iflg;
#endif
{
char func[] = "HLUCTCHCL";
int i, pai, dpix;
char buffer[NhlDASHBUFSIZE];
int lcol;
float thickness, tf;
float *thp;
int *dpp;
if (Cnp == NULL) {
_NHLCALLF(ctchcl,CTCHCL)(iflg);
return;
}
dpp = (int *) Cnp->line_dash_patterns->data;
thp = (float *) Cnp->line_thicknesses->data;
if (*iflg != 1) return;
c_ctgeti("PAI", &pai);
if (pai > 0 && pai < 256) {
if (! Cnp->do_lines) return;
thickness = Cnp->mono_line_thickness ?
Cnp->line_thickness : thp[pai-1];
lcol = Cnp->mono_line_color ?
Cnp->gks_line_colors[0] : Cnp->gks_line_colors[pai-1];
dpix = Cnp->mono_line_dash_pattern ?
Cnp->line_dash_pattern : dpp[pai-1];
}
else {
NhlcnRegionAttrs *reg_attrs;
#if 0
printf("hluctchcl pai: %d\n", pai);
#endif
switch (pai) {
case -1:
reg_attrs = &Cnp->missing_val;
break;
case -2:
reg_attrs = &Cnp->out_of_range;
break;
default:
return;
}
thickness = reg_attrs->perim_thick;
lcol = reg_attrs->gks_pcolor;
dpix = reg_attrs->perim_dpat;
}
memset((void *) buffer,'\0', sizeof(buffer)*sizeof(char));
c_pcseti("FN",0);
c_pcseti("CL",1);
c_pcseti("CC",-1);
c_pcseti("OC",-1);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
/*
* Reset DashPack so we know what state we are starting from.
*/
_NHLCALLF(dprset,DPRSET)();
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
if (dpix < 0)
dpix = NhlSOLIDLINE;
else if (dpix > Cnp->dtable_len)
dpix = 1 + (dpix - 1) % Cnp->dtable_len;
strncpy(buffer,Cnp->dtable[dpix],sizeof(buffer) - 1);
tf = Cnp->line_dash_seglen / (strlen(buffer)+.5);
c_dpsetr("WOG",(float)tf);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
c_dpsetr("WOS",(float)tf);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
if (lcol == NhlTRANSPARENT) {
for (i = 0; i < strlen(buffer); i++)
buffer[i] = '_';
}
else{
gset_line_colr_ind((Gint)lcol);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
}
gset_linewidth(thickness);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
if (pai > 0 && Cnp->llabel_placement == NhlCONSTANT) {
int buff_size = sizeof(buffer) - strlen(buffer) - 1;
char *tchar = &buffer[strlen(buffer)];
char *ts = ((NhlString *) Cnp->line_lbls.text)[pai-1];
NhlcnLevelUseMode *lup =
(NhlcnLevelUseMode *) Cnp->level_flags->data;
NhlcnLevelUseMode flag;
NhlColorIndex llcol;
int j;
NhlBoolean do_label;
llcol = Cnp->line_lbls.mono_color ?
Cnp->line_lbls.gks_color :
Cnp->line_lbls.colors[pai-1];
flag = Cnp->mono_level_flag ?
Cnp->level_flag : lup[pai-1];
do_label = Cnp->line_lbls.on && flag > NhlLINEONLY;
if (llcol == NhlTRANSPARENT && do_label) {
/*
* Put spaces in for label.
*/
j = MIN(strlen(ts) * 2 + 1,buff_size);
for(i=0;i < j-1;i+=2){
tchar[i] = ' ';
tchar[i+1] = '|';
}
}
else if (do_label) {
/*
* Add breaks in at each space of the label.
*/
i=0;
j=0;
while (i < buff_size && ts[j] != '\0'){
if (ts[j] == ' ')
tchar[i++] = '|';
tchar[i++] = ts[j++];
}
c_pcseti("OC",llcol);
c_pcseti("CC",llcol);
_NhlSetFillOpacity(Cnl, 1.0); /* NCL-1509 */
}
c_pcsetr("PH",(float)Cnp->line_lbls.pheight);
c_pcsetr("PW",(float)Cnp->line_lbls.pwidth);
c_pcsetr("CS",(float)Cnp->line_lbls.cspacing);
c_pcseti("FN",Cnp->line_lbls.font);
c_pcseti("QU",Cnp->line_lbls.quality);
c_pcsetc("FC",Cnp->line_lbls.fcode);
c_pcsetr("CL",(float)Cnp->line_lbls.thickness);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
}
c_dpsetc("DPT",buffer);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
c_dpsetr("WOC",(float)Cnp->line_lbls.real_height);
(void)_NhlLLErrCheckPrnt(NhlWARNING,func);
return;
}
/*
* Function: Substitute
*
* Description: substitutes a string for a Conpack substitution sequence.
*
* In Args:
*
* Out Args: NONE
*
* Return Values: Error Conditions
*
* Side Effects: Objects created and destroyed.
*/
static void Substitute
#if NhlNeedProto
(
char *buf,
int replace_count,
char *subst
)
#else
(buf,replace_count,subst)
char *buf;
int replace_count;
char *subst;
#endif
{
int subst_count,add,buflen;
char *from, *to;
buflen = strlen(buf);
subst_count = strlen(subst);
if (subst_count - replace_count < 0) {
for (from = buf+replace_count,to = buf+subst_count; ;
to++,from++) {
*to = *from;
if (*from == '\0')
break;
}
}
else if ((add = subst_count - replace_count) > 0) {
for (from = buf + buflen,to = buf + buflen + add;
from >= buf + replace_count;)
*to-- = *from--;
}
strncpy(buf,subst,subst_count);
}
/*
* Function: hluctchhl
*
* Description:
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
void (_NHLCALLF(hluctchhl,HLUCTCHHL))
#if NhlNeedProto
(
int *iflg
)
#else
(iflg)
int *iflg;
#endif
{
char buf[128];
char *fstr,*sub;
float dva;
NhlFormatRec *frec;
int *fwidth, *sig_digits, *left_sig_digit, *point_pos, *exp_switch_len, *exp_field_width;
if (Cnp == NULL) {
_NHLCALLF(ctchhl,CTCHHL)(iflg);
return;
}
#if 0
{ /* for debugging */
float flx,frx,fby,fuy,wlx,wrx,wby,wuy; int ll;
c_getset(&flx,&frx,&fby,&fuy,&wlx,&wrx,&wby,&wuy,&ll);
printf("getset - %f,%f,%f,%f,%f,%f,%f,%f\n",
flx,frx,fby,fuy,wlx,wrx,wby,wuy);
}
#endif
switch (*iflg) {
case 1:
case 3:
case 5:
case 7:
frec = &Cnp->max_data_format;
fwidth = frec->field_width_flag == NhlffUNSPECED ? NULL : &frec->field_width;
sig_digits = frec->sig_digits_flag == NhlffUNSPECED ? NULL : &frec->sig_digits;
left_sig_digit = frec->left_sig_digit_flag == NhlffUNSPECED ? NULL : &frec->left_sig_digit;
point_pos = frec->point_position_flag == NhlffUNSPECED ? NULL : &frec->point_position;
exp_switch_len = frec->exp_switch_flag == NhlffUNSPECED ? NULL : &frec->exp_switch_len;
exp_field_width = frec->exp_field_width_flag == NhlffUNSPECED ? NULL : &frec->exp_field_width;
/* drop through */
default:
break;
}
switch (*iflg) {
case 1:
if (! Cnp->high_lbls.on) {
c_ctsetc("CTM"," ");
return;
}
if ( Cnp->high_lbls.gks_color > NhlTRANSPARENT) {
c_pcseti("CC", Cnp->high_lbls.gks_color);
c_pcseti("OC", Cnp->high_lbls.gks_color);
_NhlSetFillOpacity(Cnl, 1.0); /* NCL-1509 */
}
c_pcsetr("PH",(float)Cnp->high_lbls.pheight);
c_pcsetr("PW",(float)Cnp->high_lbls.pwidth);
c_pcsetr("CS",(float)Cnp->high_lbls.cspacing);
c_pcseti("FN",Cnp->high_lbls.font);
c_pcseti("QU",Cnp->high_lbls.quality);
c_pcsetc("FC",Cnp->high_lbls.fcode);
gset_linewidth(Cnp->high_lbls.thickness);
strcpy(buf,(char *)Cnp->high_lbls.text);
if ((sub = strstr(buf,"$ZDV$")) == NULL) {
return;
}
c_ctgetr("dva",&dva);
dva /= Cnp->label_scale_factor;
fstr = _NhlFormatFloat(&Cnp->high_lbls.format,dva,
fwidth, sig_digits,
left_sig_digit, exp_field_width,
exp_switch_len, point_pos,
Cnp->high_lbls.fcode[0],
"ContourPlotDraw");
Substitute(sub,5,fstr);
c_ctsetc("CTM",buf);
break;
case 2:
if (! Cnp->high_lbls.on) return;
if (Cnp->hlb_val > 1) {
if (Cnp->high_lbls.gks_bcolor == NhlTRANSPARENT)
_NhlSetFillOpacity(Cnl, 0.0);
else
gset_fill_colr_ind(Cnp->high_lbls.gks_bcolor);
}
break;
case -2:
if (! Cnp->high_lbls.on) return;
if (Cnp->hlb_val > 1 && Cnp->high_lbls.gks_bcolor == NhlTRANSPARENT)
_NhlSetFillOpacity(Cnl, 1.0);
break;
case 3:
if (! Cnp->high_lbls.on) {
c_ctsetc("CTM"," ");
return;
}
if ( Cnp->high_lbls.gks_color > NhlTRANSPARENT) {
c_pcseti("CC", Cnp->high_lbls.gks_color);
c_pcseti("OC", Cnp->high_lbls.gks_color);
_NhlSetFillOpacity(Cnl, 1.0); /* NCL-1509 */
}
else {
c_ctsetc("CTM"," ");
return;
}
c_pcsetr("PH",(float)Cnp->high_lbls.pheight);
c_pcsetr("PW",(float)Cnp->high_lbls.pwidth);
c_pcsetr("CS",(float)Cnp->high_lbls.cspacing);
c_pcseti("FN",Cnp->high_lbls.font);
c_pcseti("QU",Cnp->high_lbls.quality);
c_pcsetc("FC",Cnp->high_lbls.fcode);
gset_linewidth((float)Cnp->high_lbls.thickness);
strcpy(buf,(char *)Cnp->high_lbls.text);
if ((sub = strstr(buf,"$ZDV$")) == NULL) {
return;
}
c_ctgetr("dva",&dva);
dva /= Cnp->label_scale_factor;
fstr = _NhlFormatFloat(&Cnp->high_lbls.format,dva,
fwidth, sig_digits,
left_sig_digit, exp_field_width,
exp_switch_len, point_pos,
Cnp->high_lbls.fcode[0],
"ContourPlotDraw");
Substitute(sub,5,fstr);
c_ctsetc("CTM",buf);
Cnp->high_lbls.count++;
break;
case 4:
if (( Cnp->hlb_val % 2 == 1) &&
(Cnp->high_lbls.perim_on == False || Cnp->high_lbls.perim_lcolor == NhlTRANSPARENT))
_NhlSetLineOpacity(Cnl, 0.0);
else {
gset_line_colr_ind(Cnp->high_lbls.gks_plcolor);
gset_linewidth(Cnp->high_lbls.perim_lthick);
}
break;
case -4:
if (( Cnp->hlb_val % 2 == 1) &&
(Cnp->high_lbls.perim_on == False || Cnp->high_lbls.perim_lcolor == NhlTRANSPARENT))
_NhlSetLineOpacity(Cnl, 1.0);
break;
case 5:
if (! Cnp->low_lbls.on) {
c_ctsetc("CTM"," ");
return;
}
if (Cnp->low_lbls.gks_color > NhlTRANSPARENT) {
c_pcseti("CC", Cnp->low_lbls.gks_color);
c_pcseti("OC", Cnp->low_lbls.gks_color);
_NhlSetFillOpacity(Cnl, 1.0); /* NCL-1509 */
}
c_pcsetr("PH",(float)Cnp->low_lbls.pheight * LowLabelFactor);
c_pcsetr("PW",(float)Cnp->low_lbls.pwidth * LowLabelFactor);
c_pcsetr("CS",(float)Cnp->low_lbls.cspacing);
c_pcseti("FN",Cnp->low_lbls.font);
c_pcseti("QU",Cnp->low_lbls.quality);
c_pcsetc("FC",Cnp->low_lbls.fcode);
gset_linewidth((float)Cnp->low_lbls.thickness);
strcpy(buf,(char *)Cnp->low_lbls.text);
if ((sub = strstr(buf,"$ZDV$")) == NULL) {
return;
}
c_ctgetr("dva",&dva);
dva /= Cnp->label_scale_factor;
fstr = _NhlFormatFloat(&Cnp->low_lbls.format,dva,
fwidth, sig_digits,
left_sig_digit, exp_field_width,
exp_switch_len, point_pos,
Cnp->low_lbls.fcode[0],
"ContourPlotDraw");
Substitute(sub,5,fstr);
c_ctsetc("CTM",buf);
break;
case 6:
if (! Cnp->low_lbls.on) return;
if (Cnp->hlb_val > 1) {
if (Cnp->low_lbls.gks_bcolor == NhlTRANSPARENT)
_NhlSetFillOpacity(Cnl, 0.0);
else
gset_fill_colr_ind(Cnp->low_lbls.gks_bcolor);
}
break;
case -6:
if (! Cnp->low_lbls.on) return;
if (Cnp->hlb_val > 1 && Cnp->low_lbls.gks_bcolor == NhlTRANSPARENT)
_NhlSetFillOpacity(Cnl, 1.0);
break;
case 7:
if (! Cnp->low_lbls.on) {
c_ctsetc("CTM"," ");
return;
}
if (Cnp->low_lbls.gks_color > NhlTRANSPARENT) {
c_pcseti("CC", Cnp->low_lbls.gks_color);
c_pcseti("OC", Cnp->low_lbls.gks_color);
_NhlSetFillOpacity(Cnl, 1.0); /* NCL-1509 */
}
else {
c_ctsetc("CTM"," ");
return;
}
c_pcsetr("PH",(float)Cnp->low_lbls.pheight * LowLabelFactor);
c_pcsetr("PW",(float)Cnp->low_lbls.pwidth * LowLabelFactor);
c_pcsetr("CS",(float)Cnp->low_lbls.cspacing);
c_pcseti("FN",Cnp->low_lbls.font);
c_pcseti("QU",Cnp->low_lbls.quality);
c_pcsetc("FC",Cnp->low_lbls.fcode);
gset_linewidth((float)Cnp->low_lbls.thickness);
strcpy(buf,(char *)Cnp->low_lbls.text);
if ((sub = strstr(buf,"$ZDV$")) == NULL) {
return;
}
c_ctgetr("dva",&dva);
dva /= Cnp->label_scale_factor;
fstr = _NhlFormatFloat(&Cnp->low_lbls.format,dva,
fwidth, sig_digits,
left_sig_digit, exp_field_width,
exp_switch_len, point_pos,
Cnp->low_lbls.fcode[0],
"ContourPlotDraw");
Substitute(sub,5,fstr);
c_ctsetc("CTM",buf);
Cnp->low_lbls.count++;
break;
case 8:
if (( Cnp->hlb_val % 2 == 1) &&
(Cnp->low_lbls.perim_on == False || Cnp->low_lbls.perim_lcolor == NhlTRANSPARENT))
_NhlSetLineOpacity(Cnl, 0.0);
else {
gset_line_colr_ind(Cnp->low_lbls.gks_plcolor);
gset_linewidth(Cnp->low_lbls.perim_lthick);
}
break;
case -8:
if (( Cnp->hlb_val % 2 == 1) &&
(Cnp->low_lbls.perim_on == False || Cnp->low_lbls.perim_lcolor == NhlTRANSPARENT))
_NhlSetLineOpacity(Cnl, 1.0);
break;
default:
break;
}
return;
}
/*
* Function: hluctchll
*
* Description:
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
void (_NHLCALLF(hluctchll,HLUCTCHLL))
#if NhlNeedProto
(
int *iflg
)
#else
(iflg)
int *iflg;
#endif
{
int pai;
static int llcol;
if (Cnp == NULL) {
_NHLCALLF(ctchll,CTCHLL)(iflg);
return;
}
if (Cnp->llabel_placement == NhlCONSTANT)
return;
if (*iflg == 1) {
c_pcsetr("PH",(float)Cnp->line_lbls.pheight);
c_pcsetr("PW",(float)Cnp->line_lbls.pwidth);
c_pcsetr("CS",(float)Cnp->line_lbls.cspacing);
c_pcseti("FN",Cnp->line_lbls.font);
c_pcseti("QU",Cnp->line_lbls.quality);
c_pcsetc("FC",Cnp->line_lbls.fcode);
gset_linewidth((float)Cnp->line_lbls.thickness);
}
else if (*iflg == 2) {
if (Cnp->line_lbls.gks_bcolor > NhlTRANSPARENT)
gset_fill_colr_ind(Cnp->line_lbls.gks_bcolor);
}
else if (*iflg == 3) {
c_ctgeti("PAI", &pai);
if (pai > 0) {
pai -= 1;
llcol = Cnp->line_lbls.mono_color ?
Cnp->line_lbls.gks_color :
Cnp->line_lbls.colors[pai];
if (llcol > NhlTRANSPARENT) {
c_pcseti("CC",llcol);
c_pcseti("OC",llcol);
_NhlSetFillOpacity(Cnl, 1.0); /* NCL-1509 */
}
else {
c_ctsetc("CTM"," ");
}
c_pcsetr("PH",(float)Cnp->line_lbls.pheight);
c_pcsetr("PW",(float)Cnp->line_lbls.pwidth);
c_pcsetr("CS",(float)Cnp->line_lbls.cspacing);
c_pcseti("FN",Cnp->line_lbls.font);
c_pcseti("QU",Cnp->line_lbls.quality);
c_pcsetc("FC",Cnp->line_lbls.fcode);
gset_linewidth((float)Cnp->line_lbls.thickness);
}
Cnp->line_lbls.count++;
}
else if (*iflg == 4) {
gset_line_colr_ind(Cnp->line_lbls.gks_plcolor);
gset_linewidth(Cnp->line_lbls.perim_lthick);
}
return;
}
/* low level overlay mapping functions */
static void OverlayMapXY
#if NhlNeedProto
(
NhlTransformLayerPart *tfp,
float *xin,
float *yin,
float* xout,
float* yout)
#else
(tfp,xin,yin,xout,yout)
NhlTransformLayerPart *tfp;
float *xin;
float *yin;
float *xout;
float *yout;
#endif
{
int status = 0;
if (! tfp->overlay_trans_obj ||
tfp->overlay_trans_obj == tfp->trans_obj) {
_NhlCompcToWin(tfp->trans_obj,xin,yin,1,xout,yout,
&status,NULL,NULL);
}
else {
_NhlCompcToData(tfp->trans_obj,xin,yin,1,xout,yout,
&status,NULL,NULL);
if (status) return;
#if 0
fprintf (stderr,"inter: %f %f : ",*xout,*yout);
#endif
_NhlDataToWin(tfp->overlay_trans_obj,
xout,yout,1,xout,yout,&status,NULL,NULL);
}
#if 0
fprintf (stderr,"%f %f : %f %f \n",*xin,*yin,*xout,*yout);
#endif
return;
}
static void OverlayInvMapXY
#if NhlNeedProto
(
NhlTransformLayerPart *tfp,
float *xin,
float *yin,
float* xout,
float* yout)
#else
(tfp,xin,yin,xout,yout)
NhlTransformLayerPart *tfp;
float *xin;
float *yin;
float *xout;
float *yout;
#endif
{
int status = 0;
if (! tfp->overlay_trans_obj ||
tfp->overlay_trans_obj == tfp->trans_obj) {
_NhlWinToCompc(tfp->trans_obj,xin,yin,1,xout,yout,
&status,NULL,NULL);
}
else {
_NhlWinToData(tfp->overlay_trans_obj,
xin,yin,1,xout,yout,
&status,NULL,NULL);
if (status) return;
#if 0
fprintf (stderr,"inter: %f %f : ",*xout,*yout);
#endif
_NhlDataToCompc(tfp->trans_obj,xout,yout,1,xout,yout,
&status,NULL,NULL);
}
#if 0
fprintf (stderr,"%f %f : %f %f \n",*xin,*yin,*xout,*yout);
#endif
return;
}
/*
* Function: hluctmxyz
*
* Description:
*
* In Args:
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
void (_NHLCALLF(hluctmxyz,HLUCTMXYZ))
#if NhlNeedProto
(
int *imap,
float *xinp,
float *yinp,
float *zinp,
float *xotp,
float *yotp
)
#else
(imap,xinp,yinp,zinp,xotp,yotp)
int *imap;
float *xinp;
float *yinp;
float *zinp,
float *xotp;
float *yotp;
#endif
{
int status;
float xtmp,ytmp;
float rtod = 57.2957795130823;
if (Cnp == NULL) {
_NHLCALLF(ctmxyz,CTMXYZ)(imap,xinp,yinp,zinp,xotp,yotp);
return;
}
if (*imap == - Nhlcn1DMESHMAPVAL && Tmp->ezmap) {
OverlayInvMapXY(&Cnl->trans,xinp,yinp,xotp,yotp);
}
else if (abs(*imap) != NhlcnMAPVAL) {
*xotp = *xinp;
*yotp = *yinp;
}
else if (Cnl->trans.overlay_status == _tfCurrentOverlayMember &&
! Cnl->trans.do_ndc_overlay) {
if (*imap > 0) {
ytmp = rtod*asin(*zinp/
sqrt(*xinp * *xinp + *yinp * *yinp +
*zinp * *zinp));
if (*xinp == 0 && *yinp == 0) {
xtmp = 0.0;
}
else {
xtmp = rtod * atan2(*yinp,*xinp);
}
if (xtmp < Cnp->xlb)
xtmp += 360.0;
if (xtmp > Cnp->xub)
xtmp -= 360.0;
OverlayMapXY(&Cnl->trans,&xtmp,&ytmp,xotp,yotp);
}
else
OverlayInvMapXY(&Cnl->trans,xinp,yinp,xotp,yotp);
}
else {
/* I don't know if this branch is ever taken any more */
if (*imap > 0) {
ytmp = rtod*asin(*zinp/
sqrt(*xinp * *xinp + *yinp * *yinp +
*zinp * *zinp));
if (*xinp == 0 && *yinp == 0) {
xtmp = 0.0;
}
else {
xtmp = rtod * atan2(*yinp,*xinp);
}
if (xtmp < Cnp->xlb)
xtmp += 360.0;
if (xtmp > Cnp->xub)
xtmp -= 360.0;
_NhlCompcToWin((NhlLayer)Cnp->trans_obj,
&xtmp,&ytmp,1,xotp,yotp,
&status,NULL,NULL);
}
else {
_NhlWinToCompc((NhlLayer)Cnp->trans_obj,
xinp,yinp,1,xotp,yotp,
&status,NULL,NULL);
}
}
return;
}
/*
* Function: load_hluct_routines
*
* Description: Forces the hluct... routines to load from the HLU library
*
* In Args: NhlBoolean flag - should always be False - dont actually
* want to call the routines.
*
* Out Args:
*
* Return Values:
*
* Side Effects:
*/
/*ARGSUSED*/
static void load_hluct_routines
#if NhlNeedProto
(
NhlBoolean flag
)
#else
(flag)
NhlBoolean flag;
#endif
{
int idum;
float fdum;
if (flag) {
_NHLCALLF(hluctmxyz,HLUCTMXYZ)
(&idum,&fdum,&fdum,&fdum,&fdum,&fdum);
_NHLCALLF(hluctchll,HLUCTCHLL)(&idum);
_NHLCALLF(hluctchhl,HLUCTCHHL)(&idum);
_NHLCALLF(hluctchcl,HLUCTCHCL)(&idum);
_NHLCALLF(hluctscae,HLUCTSCAE)(&idum,&idum,&idum,&idum,
&fdum,&fdum,&fdum,&fdum,
&idum,&idum,&idum,&idum);
}
return;
}
void _NhlSetCnl
#if NhlNeedProto
(
NhlContourPlotLayer cnl
)
#else
(cnl)
NhlContourPlotLayer cnl;
#endif
{
if (! cnl) {
Cnl = NULL;
Cnp = NULL;
}
else {
Cnl = cnl;
Cnp = &cnl->contourplot;
}
return;
}
|
GB_emult_08_phase0.c | //------------------------------------------------------------------------------
// GB_emult_08_phase0: find vectors of C to compute for C=A.*B or C<M>=A.*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// The eWise multiply of two matrices, C=A.*B, C<M>=A.*B, or C<!M>=A.*B starts
// with this phase, which determines which vectors of C need to be computed.
// On input, A and B are the two matrices being ewise multiplied, and M is the
// optional mask matrix. If present, it is not complemented.
// The M, A, and B matrices are sparse or hypersparse. C will be sparse
// (if Ch is returned NULL) or hypersparse (if Ch is returned non-NULL).
// Ch: the vectors to compute in C. Not allocated, but equal to either
// A->h, B->h, or M->h, or NULL if C is not hypersparse.
// C_to_A: if A is hypersparse, and Ch is not A->h, then C_to_A [k] = kA
// if the kth vector j = Ch [k] is equal to Ah [kA]. If j does not appear
// in A, then C_to_A [k] = -1. Otherwise, C_to_A is returned as NULL.
// C is always hypersparse in this case.
// C_to_B: if B is hypersparse, and Ch is not B->h, then C_to_B [k] = kB
// if the kth vector j = Ch [k] is equal to Bh [kB]. If j does not appear
// in B, then C_to_B [k] = -1. Otherwise, C_to_B is returned as NULL.
// C is always hypersparse in this case.
// C_to_M: if M is hypersparse, and Ch is not M->h, then C_to_M [k] = kM
// if the kth vector j = GBH (Ch, k) is equal to Mh [kM].
// If j does not appear in M, then C_to_M [k] = -1. Otherwise, C_to_M is
// returned as NULL. C is always hypersparse in this case.
// FUTURE:: exploit A==M, B==M, and A==B aliases
#include "GB_emult.h"
GrB_Info GB_emult_08_phase0 // find vectors in C for C=A.*B or C<M>=A.*B
(
int64_t *p_Cnvec, // # of vectors to compute in C
const int64_t *restrict *Ch_handle, // Ch is M->h, A->h, B->h, or NULL
size_t *Ch_size_handle,
int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL
size_t *C_to_M_size_handle,
int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL
size_t *C_to_A_size_handle,
int64_t *restrict *C_to_B_handle, // C_to_B: size Cnvec, or NULL
size_t *C_to_B_size_handle,
int *C_sparsity, // sparsity structure of C
// original input:
const GrB_Matrix M, // optional mask, may be NULL
const GrB_Matrix A,
const GrB_Matrix B,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// M, A, and B can be jumbled for this phase
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (Ch_size_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for emult phase0", GB0) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ; // pattern not accessed
ASSERT (!GB_PENDING (M)) ;
ASSERT_MATRIX_OK (A, "A for emult phase0", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (B)) ; // pattern not accessed
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (B, "B for emult phase0", GB0) ;
ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (GB_JUMBLED_OK (A)) ; // pattern not accessed
ASSERT (!GB_PENDING (B)) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (A->vlen == B->vlen) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
ASSERT (GB_IMPLIES (M != NULL, A->vlen == M->vlen)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
(*p_Cnvec) = 0 ;
(*Ch_handle) = NULL ;
(*Ch_size_handle) = 0 ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
ASSERT ((*C_sparsity) == GxB_SPARSE || (*C_sparsity) == GxB_HYPERSPARSE) ;
const int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ;
int64_t *restrict C_to_M = NULL ; size_t C_to_M_size = 0 ;
int64_t *restrict C_to_A = NULL ; size_t C_to_A_size = 0 ;
int64_t *restrict C_to_B = NULL ; size_t C_to_B_size = 0 ;
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
int64_t vlen = A->vlen ;
const int64_t *restrict Ah = A->h ;
bool A_is_hyper = (Ah != NULL) ;
int64_t Bnvec = B->nvec ;
const int64_t *restrict Bh = B->h ;
bool B_is_hyper = (Bh != NULL) ;
int64_t Mnvec = 0 ;
const int64_t *restrict Mh = NULL ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mh = M->h ;
M_is_hyper = (Mh != NULL) ;
}
//--------------------------------------------------------------------------
// determine how to construct the vectors of C
//--------------------------------------------------------------------------
if (M != NULL)
{
//----------------------------------------------------------------------
// 8 cases to consider: A, B, M can each be hyper or sparse
//----------------------------------------------------------------------
// Mask is present and not complemented
if (A_is_hyper)
{
if (B_is_hyper)
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (1) A hyper, B hyper, M hyper: C hyper
//----------------------------------------------------------
// Ch = smaller of Mh, Bh, Ah
int64_t nvec = GB_IMIN (Anvec, Bnvec) ;
nvec = GB_IMIN (nvec, Mnvec) ;
if (nvec == Anvec)
{
Ch = Ah ; Ch_size = A->h_size ;
}
else if (nvec == Bnvec)
{
Ch = Bh ; Ch_size = B->h_size ;
}
else // (nvec == Mnvec)
{
Ch = Mh ; Ch_size = M->h_size ;
}
}
else
{
//----------------------------------------------------------
// (2) A hyper, B hyper, M sparse: C hyper
//----------------------------------------------------------
// Ch = smaller of Ah, Bh
if (Anvec <= Bnvec)
{
Ch = Ah ; Ch_size = A->h_size ;
}
else
{
Ch = Bh ; Ch_size = B->h_size ;
}
}
}
else
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (3) A hyper, B sparse, M hyper: C hyper
//----------------------------------------------------------
// Ch = smaller of Mh, Ah
if (Anvec <= Mnvec)
{
Ch = Ah ; Ch_size = A->h_size ;
}
else
{
Ch = Mh ; Ch_size = M->h_size ;
}
}
else
{
//----------------------------------------------------------
// (4) A hyper, B sparse, M sparse: C hyper
//----------------------------------------------------------
Ch = Ah ; Ch_size = A->h_size ;
}
}
}
else
{
if (B_is_hyper)
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (5) A sparse, B hyper, M hyper: C hyper
//----------------------------------------------------------
// Ch = smaller of Mh, Bh
if (Bnvec <= Mnvec)
{
Ch = Bh ; Ch_size = B->h_size ;
}
else
{
Ch = Mh ; Ch_size = M->h_size ;
}
}
else
{
//----------------------------------------------------------
// (6) A sparse, B hyper, M sparse: C hyper
//----------------------------------------------------------
Ch = Bh ; Ch_size = B->h_size ;
}
}
else
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (7) A sparse, B sparse, M hyper: C hyper
//----------------------------------------------------------
Ch = Mh ; Ch_size = M->h_size ;
}
else
{
//----------------------------------------------------------
// (8) A sparse, B sparse, M sparse: C sparse
//----------------------------------------------------------
Ch = NULL ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// 4 cases to consider: A, B can be hyper or sparse
//----------------------------------------------------------------------
// Mask is not present, or present and complemented.
if (A_is_hyper)
{
if (B_is_hyper)
{
//--------------------------------------------------------------
// (1) A hyper, B hyper: C hyper
//--------------------------------------------------------------
// Ch = smaller of Ah, Bh
if (Anvec <= Bnvec)
{
Ch = Ah ; Ch_size = A->h_size ;
}
else
{
Ch = Bh ; Ch_size = B->h_size ;
}
}
else
{
//--------------------------------------------------------------
// (2) A hyper, B sparse: C hyper
//--------------------------------------------------------------
Ch = Ah ; Ch_size = A->h_size ;
}
}
else
{
if (B_is_hyper)
{
//--------------------------------------------------------------
// (3) A sparse, B hyper: C hyper
//--------------------------------------------------------------
Ch = Bh ; Ch_size = B->h_size ;
}
else
{
//--------------------------------------------------------------
// (4) A sparse, B sparse: C sparse
//--------------------------------------------------------------
Ch = NULL ;
}
}
}
//--------------------------------------------------------------------------
// find Cnvec
//--------------------------------------------------------------------------
int64_t Cnvec ;
if (Ch == NULL)
{
// C is sparse
(*C_sparsity) = GxB_SPARSE ;
Cnvec = n ;
}
else
{
// C is hypersparse; one of A, B, or M are hypersparse
ASSERT (A_is_hyper || B_is_hyper || M_is_hyper) ;
(*C_sparsity) = GxB_HYPERSPARSE ;
if (Ch == Ah)
{
Cnvec = Anvec ;
}
else if (Ch == Bh)
{
Cnvec = Bnvec ;
}
else // (Ch == Mh)
{
Cnvec = Mnvec ;
}
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// construct C_to_M mapping
//--------------------------------------------------------------------------
if (M_is_hyper && Ch != Mh)
{
// allocate C_to_M
C_to_M = GB_MALLOC_WORK (Cnvec, int64_t, &C_to_M_size) ;
if (C_to_M == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
// compute C_to_M
ASSERT (Ch != NULL) ;
const int64_t *restrict Mp = M->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t pM, pM_end, kM = 0 ;
int64_t j = Ch [k] ;
GB_lookup (true, Mh, Mp, vlen, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
//--------------------------------------------------------------------------
// construct C_to_A mapping
//--------------------------------------------------------------------------
if (A_is_hyper && Ch != Ah)
{
// allocate C_to_A
C_to_A = GB_MALLOC_WORK (Cnvec, int64_t, &C_to_A_size) ;
if (C_to_A == NULL)
{
// out of memory
GB_FREE_WORK (&C_to_M, C_to_M_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
// compute C_to_A
ASSERT (Ch != NULL) ;
const int64_t *restrict Ap = A->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t pA, pA_end, kA = 0 ;
int64_t j = Ch [k] ;
GB_lookup (true, Ah, Ap, vlen, &kA, Anvec-1, j, &pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
}
//--------------------------------------------------------------------------
// construct C_to_B mapping
//--------------------------------------------------------------------------
if (B_is_hyper && Ch != Bh)
{
// allocate C_to_B
C_to_B = GB_MALLOC_WORK (Cnvec, int64_t, &C_to_B_size) ;
if (C_to_B == NULL)
{
// out of memory
GB_FREE_WORK (&C_to_M, C_to_M_size) ;
GB_FREE_WORK (&C_to_A, C_to_A_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
// compute C_to_B
ASSERT (Ch != NULL) ;
const int64_t *restrict Bp = B->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t pB, pB_end, kB = 0 ;
int64_t j = Ch [k] ;
GB_lookup (true, Bh, Bp, vlen, &kB, Bnvec-1, j, &pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec) = Cnvec ;
(*Ch_handle) = Ch ;
(*Ch_size_handle) = Ch_size ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
(*C_to_M_size_handle) = C_to_M_size ;
}
(*C_to_A_handle) = C_to_A ; (*C_to_A_size_handle) = C_to_A_size ;
(*C_to_B_handle) = C_to_B ; (*C_to_B_size_handle) = C_to_B_size ;
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as sparse
j = k ;
}
else
{
// C will be constructed as hypersparse
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse
ASSERT (A_is_hyper)
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = A->h [kA] ;
ASSERT (j == jA) ;
}
}
else if (A_is_hyper)
{
// A is hypersparse, and Ch is a shallow copy of A->h
ASSERT (Ch == A->h) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B_is_hyper)
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else if (B_is_hyper)
{
// A is hypersparse, and Ch is a shallow copy of A->h
ASSERT (Ch == B->h) ;
}
// see if M (:,j) exists
if (Ch != NULL && M != NULL && Ch == M->h)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M->h != NULL) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M->h != NULL) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or in sparse form
ASSERT (M == NULL || M->h == NULL) ;
}
}
#endif
return (GrB_SUCCESS) ;
}
|
declare_target_include.h | #pragma omp declare target
void zyx();
#pragma omp end declare target
|
sv.c |
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <malloc.h>
#include <string.h>
#include "timer.h"
#include "stat.h"
#include "faultInjection.h"
#include "sv.h"
static long long MemAccessCount;
int numComponent(graph_t *graph, lp_state_t *lp_state)
{
size_t nv = graph->numVertices;
uint32_t* CC = lp_state->CC;
int numComponents =0;
for (int i = 0; i < nv; ++i)
{
if (CC[i]>numComponents)
{
numComponents = CC[i];
}
}
return numComponents;
}
int alloc_lp_state(graph_t *graph, lp_state_t *lp_state)
{
size_t numVertices = graph->numVertices;
lp_state->CC = (uint32_t*)memalign(64, numVertices * sizeof(uint32_t));
lp_state->Ps = (uint32_t*)memalign(64, numVertices * sizeof(uint32_t));
lp_state->P = (uint32_t*)memalign(64, numVertices * sizeof(uint32_t));
lp_state->Cr = (int*)memalign(64, numVertices * sizeof(int));
return 0;
}
int init_lp_state(graph_t *graph, lp_state_t *lp_state)
{
size_t numVertices = graph->numVertices;
for (int i = 0; i < numVertices; ++i)
{
lp_state->CC[i] = i;
lp_state->Ps[i] = -1;
lp_state->Cr[i] = 0;
}
return 0;
}
int free_lp_state(lp_state_t *lp_state)
{
free(lp_state->CC);
free(lp_state->Ps);
free(lp_state->P);
free(lp_state->Cr);
}
int printParentTree(char *name, graph_t* graph, lp_state_t *lp_state)
// prints a directed graph in dot format with title as s
{
if (getenv("PRINT_GRAPH") != NULL)
{
int ind = (int) atoi(getenv("PRINT_GRAPH"));
if(ind==0) return 0;
}
else
{
return 0;
}
size_t nv = graph->numVertices;
printf("digraph %s { \n", name);
printf("rankdir=BT\n");
uint32_t *CC = lp_state->CC;
for (uint32_t v = 0; v < nv; v++)
{
// printf(" %d %d %d %d\n", v, lp_state->Ps[v], graph->off[v] );
if (graph->off[v] + lp_state->Ps[v] < graph->numEdges || lp_state->Ps[v] == -1)
{
uint32_t Pv = lp_state->Ps[v] == -1 ? v : graph->ind[graph->off[v] + lp_state->Ps[v]];
printf("\"%d|%d\" -> \"%d|%d\";\n", v, CC[v], Pv, CC[Pv] );
}
}
printf("labelloc=\"t\"\n");
printf("label=\"%s\"\n", name);
printf("}\n");
}
int printGraph(char *name, graph_t* graph, lp_state_t *lp_state)
{
// return 0;
if (getenv("PRINT_GRAPH") != NULL)
{
int ind = (int) atoi(getenv("PRINT_GRAPH"));
if(ind==0) return 0;
}
else
{
return 0;
}
size_t nv = graph->numVertices;
uint32_t* off = graph->off;
uint32_t* ind = graph->ind;
printf("graph input { \n");
printf("rankdir=BT\n");
for (uint32_t v = 0; v < nv; v++)
{
uint32_t *vind = &ind[off[v]];
size_t vdeg = off[v + 1] - off[v];
for (size_t edge = 0; edge < vdeg; edge++)
{
const uint32_t u = vind[edge];
if (v > u)
printf("%d -- %d;\n", v, u );
}
}
printf("labelloc=\"t\"\n");
printf("label=\"%s\"\n", name);
printf("}\n");
}
/*test-1: Random Init
Starts with random starting state;
runs label propagation algorithm, checks the output with baseline;
*/
int rand_lp_state( graph_t *graph,
lp_state_t *lp_state)
{
size_t numVertices = graph->numVertices;
for (int i = 0; i < numVertices; ++i)
{
lp_state->CC[i] = rand();
lp_state->Ps[i] = rand();
}
return 0;
}
/*test-2
Random-flip output test;
Run a correct LP algorithm and randomly flip some of the output at the end.
*/
int rand_flip_output( double fprob, // probability of flipping an entry
graph_t *graph,
lp_state_t *lp_state)
{
size_t numVertices = graph->numVertices;
for (int i = 0; i < numVertices; ++i)
{
uint32_t u = lp_state->CC[i];
lp_state->CC[i] = FaultInjectByte(u, fprob);
lp_state->Ps[i] = FaultInjectByte(lp_state->Ps[i], fprob);
}
return 0;
}
// bool FFSVSweep_Async(size_t nv, uint32_t* component_map, uint32_t* off, uint32_t* ind)
bool FFSVSweep_Async(graph_t *graph, lp_state_t *lp_state)
{
size_t nv = graph->numVertices;
uint32_t* off = graph->off;
uint32_t* ind = graph->ind;
uint32_t* component_map = lp_state->CC;
bool changed = false;
for (size_t v = 0; v < nv; v++)
{
const uint32_t *restrict vind = &ind[off[v]];
const size_t vdeg = off[v + 1] - off[v];
for (size_t edge = 0; edge < vdeg; edge++)
{
const uint32_t u = vind[edge];
if (component_map[u] < component_map[v])
{
lp_state->Ps[v] = edge;
component_map[v] = component_map[u];
changed = true;
}
}
}
/*shortcutting goes here*/
return changed;
}
int FFSVAlg_Async( lp_state_t *lp_state, graph_t *graph,
stat_t* stat)
{
size_t numVertices = graph->numVertices;
size_t numEdges = graph->numEdges;
uint32_t* off = graph->off;
uint32_t* ind = graph->ind;
bool changed;
size_t iteration = 0;
do
{
changed = FFSVSweep_Async(graph, lp_state);
iteration += 1;
}
while (changed);
/*updating stats*/
stat->numIteration = iteration;
// printf("// Number of iteration for Async fault free is %d\n", iteration );
return 0;
}
/*fault tolerant SV sweep */
int FFWoSVSweep_Sync(size_t nv, uint32_t* cc_prev, uint32_t* cc_curr,
uint32_t* off, uint32_t* ind)
{
int changed = 0;
// #pragma omp parallel for
for (size_t v = 0; v < nv; v++)
{
const uint32_t *restrict vind = &ind[off[v]];
const size_t vdeg = off[v + 1] - off[v];
MemAccessCount += 5;
for (size_t edge = 0; edge < vdeg; edge++)
{
const uint32_t u = vind[edge];
MemAccessCount += 2;
if (cc_prev[u] < cc_curr[v])
{
cc_curr[v] = cc_prev[u];
// changed = true;
changed++;
}
}
}
/*shortcutting goes here*/
return changed;
}
/*fault tolerant SV sweep */
int FFSVSweep_Sync(size_t nv, uint32_t* cc_prev, uint32_t* cc_curr, uint32_t* m_curr,
uint32_t* off, uint32_t* ind)
{
int changed = 0;
for (size_t v = 0; v < nv; v++)
{
const uint32_t *restrict vind = &ind[off[v]];
const size_t vdeg = off[v + 1] - off[v];
MemAccessCount += 6;
for (size_t edge = 0; edge < vdeg; edge++)
{
const uint32_t u = vind[edge];
MemAccessCount += 2;
if (cc_prev[u] < cc_curr[v])
{
m_curr[v] = edge;
cc_curr[v] = cc_prev[u];
changed++;
}
}
}
/*shortcutting goes here*/
return changed;
}
// FaultFreeSVMain
lp_state_t FFWoSVAlg_Sync( graph_t *graph,
stat_t* stat /*for counting stats of each iteration*/
)
{
size_t numVertices = graph->numVertices;
size_t numEdges = graph->numEdges;
uint32_t* off = graph->off;
uint32_t* ind = graph->ind;
/*initialize */
MemAccessCount = 0;
lp_state_t lps_curr, lps_prev;
alloc_lp_state(graph, &lps_curr);
alloc_lp_state(graph, &lps_prev);
init_lp_state(graph, &lps_curr);
init_lp_state(graph, &lps_prev);
uint32_t* cc_curr = lps_curr.CC;
uint32_t* cc_prev = lps_prev.CC;
bool changed;
int num_changes;
int num_corrections;
size_t iteration = 0;
do
{
long long prMemAccessCount = MemAccessCount;
memcpy(cc_prev, cc_curr, numVertices * sizeof(uint32_t));
tic();
num_changes = FFWoSVSweep_Sync(numVertices, cc_prev, cc_curr, off, ind);
stat->SvTime[iteration] = toc();
stat->SvMemCount[iteration] = MemAccessCount - prMemAccessCount;
iteration += 1;
}
while (num_changes);
stat->numIteration = iteration;
#ifdef DEBUG
printf("//NUmber of iteration for fault free=%d\n", iteration );
#endif
// free(cc_prev);
// return cc_curr;
free_lp_state(&lps_prev);
return lps_curr;
}
lp_state_t FFSVAlg_Sync( graph_t *graph,
lp_state_t lps_curr,
stat_t* stat, /*for counting stats of each iteration*/
int max_iter /*contgrolling maximum number of iteration*/
)
{
size_t numVertices = graph->numVertices;
size_t numEdges = graph->numEdges;
uint32_t* off = graph->off;
uint32_t* ind = graph->ind;
/*initialize */
MemAccessCount = 0;
lp_state_t lps_prev;
alloc_lp_state(graph, &lps_prev);
init_lp_state(graph, &lps_prev);
uint32_t* cc_curr = lps_curr.CC;
uint32_t* cc_prev = lps_prev.CC;
uint32_t* m_curr = lps_curr.Ps;
uint32_t* m_prev = lps_prev.Ps;
bool changed;
int num_changes;
int num_corrections;
size_t iteration = 0;
do
{
long long prMemAccessCount = MemAccessCount;
memcpy(cc_prev, cc_curr, numVertices * sizeof(uint32_t));
memcpy(m_prev, m_curr, numVertices * sizeof(uint32_t));
tic();
num_changes = FFSVSweep_Sync(numVertices, cc_prev, cc_curr, m_curr, off, ind);
// printf("Executing Iteration %d: Changes =%d, Corrections=%d\n",
// iteration, num_changes, num_corrections );
stat->SvTime[iteration] = toc();
stat->SvMemCount[iteration] = MemAccessCount - prMemAccessCount;
iteration += 1;
}
while (num_changes && iteration <= max_iter);
stat->numIteration = iteration;
#ifdef DEBUG
printf("//NUmber of iteration for fault free=%d\n", iteration );
#endif
// printf("//Number of iteration for Sync fault free=%d\n", iteration );
// free(cc_prev);
// return cc_curr;
free_lp_state(&lps_prev);
return lps_curr;
}
|
GB_unop__log1p_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fc32_fc32)
// op(A') function: GB (_unop_tran__log1p_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog1pf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog1pf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog1pf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ColorChartDetectionUtils.h | #ifndef CAPTURE3_COLOR_CHART_UTILS_H
#define CAPTURE3_COLOR_CHART_UTILS_H
#include <cmath>
#include <vector>
#include <omp.h>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include "../engine/objects/colorChart/ColorChart.h"
#include "../engine/objects/colorChart/ColorChartPatch.h"
#include "../constants/ColorChartConstants.h"
#include "../constants/ResourceConstants.h"
#include "../engine/objects/image/Image.h"
#include "ColorUtils.h"
namespace Capture3
{
static ColorChart *detectColorChart(const Image *image)
{
// Create profile
auto *chart = new ColorChart();
const std::vector<ColorChartPatch *> &patches = chart->getPatches();
/**************************************************
* Then we iterate over the captured image and
* find the min and max values for each channel,
* these values are needed to apply auto-contrast.
**************************************************/
// Fetch image size
const double *imageData = image->getRGB().getData();
const cv::Mat &imageMat = image->getRGB().getMat();
const unsigned int imageWidth = image->getSize().getWidth();
const unsigned int imageHeight = image->getSize().getHeight();
const unsigned int imageArea = image->getSize().getArea();
const cv::Size &imageSize = image->getSize().getSize();
// Min and max values
double valueMinR = 1;
double valueMinG = 1;
double valueMinB = 1;
double valueMaxR = 0;
double valueMaxG = 0;
double valueMaxB = 0;
// Find min and max values (for auto contrast)
#pragma omp parallel for schedule(static) \
reduction(min:valueMinR), \
reduction(min:valueMinG), \
reduction(min:valueMinB), \
reduction(max:valueMaxR), \
reduction(max:valueMaxG), \
reduction(max:valueMaxB)
for (unsigned int i = 0; i < imageArea; i++) {
const unsigned int index = i * 3;
const double colorR = imageData[index + 0];
const double colorG = imageData[index + 1];
const double colorB = imageData[index + 2];
valueMinR = colorR < valueMinR ? colorR : valueMinR;
valueMinG = colorG < valueMinG ? colorG : valueMinG;
valueMinB = colorB < valueMinB ? colorB : valueMinB;
valueMaxR = colorR > valueMaxR ? colorR : valueMaxR;
valueMaxG = colorG > valueMaxG ? colorG : valueMaxG;
valueMaxB = colorB > valueMaxB ? colorB : valueMaxB;
}
/**************************************************
* We create a greyscale image and detect the scene
* contours using Canny-Edge detection. After we
* found the contours, we filter out the color
* squares of the color chart
**************************************************/
// Create greyscale image
cv::Mat scene(imageSize, CV_8UC1, cv::Scalar(0));
unsigned char *sceneData = scene.data;
// Calculate greyscale image (auto contrast)
#pragma omp parallel for schedule(static)
for (unsigned int i = 0; i < imageArea; i++) {
const unsigned int index = i * 3;
const double colorR = (imageData[index + 0] - valueMinR) / (valueMaxR - valueMinR);
const double colorG = (imageData[index + 1] - valueMinG) / (valueMaxG - valueMinG);
const double colorB = (imageData[index + 2] - valueMinB) / (valueMaxB - valueMinB);
const double luma = (0.2126 * colorR) + (0.7152 * colorG) + (0.0722 * colorB);
sceneData[i] = (unsigned char) lround(luma * 255.0);
}
// Find contours from edges
cv::GaussianBlur(scene, scene, cv::Size(3, 3), 0, 0);
cv::Canny(scene, scene, 10, 30, 3);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(scene, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
scene.release();
// Check if we found any contours
const unsigned int contoursCount = (unsigned int) contours.size();
if (contoursCount > 0) {
// Array that will contain the matched rotated rectangles
// and the original corner points of the contour. We will
// need this data to calculate the position of the color chart.
std::vector<cv::Point> points;
std::vector<cv::RotatedRect> rectangles;
double rectanglesArea = 0;
double rectanglesRatio = 0;
// Iterate over contours
for (unsigned int i = 0; i < contoursCount; i++) {
// Fetch contour, calculate perimeter
const cv::Mat countour(contours[i]);
const double contourLength = cv::arcLength(countour, true);
// Approximate the contour to a polygon
std::vector<cv::Point> approx;
cv::approxPolyDP(countour, approx, contourLength * 0.02, true);
// If the poly has 4 sides and is convex then it could be a match
if (approx.size() == 4 && cv::isContourConvex(approx)) {
// Fetch the rectangle area from the match
const cv::RotatedRect rectangle = cv::minAreaRect(approx);
const double rectangleArea = rectangle.size.width * rectangle.size.height;
const double rectangleRatio = rectangle.size.width / rectangle.size.height;
// Check if the shape is rectangular enough
if (rectangleArea >= COLOR_CHART_DETECT_AREA_MIN &&
rectangleArea <= COLOR_CHART_DETECT_AREA_MAX &&
rectangleRatio >= COLOR_CHART_DETECT_RATIO_MIN &&
rectangleRatio <= COLOR_CHART_DETECT_RATIO_MAX) {
// Store rectangles points (need them later for area calculation)
points.push_back(approx[0]);
points.push_back(approx[1]);
points.push_back(approx[2]);
points.push_back(approx[3]);
rectangles.push_back(rectangle);
rectanglesArea += rectangleArea;
rectanglesRatio += rectangleRatio;
}
}
}
// Check if we found any rectangles
const unsigned int rectanglesCount = (unsigned int) rectangles.size();
if (rectanglesCount > 0) {
// Calculate thresholds
const double rectangleAreaMin = (rectanglesArea / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MIN;
const double rectangleAreaMax = (rectanglesArea / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MAX;
const double rectangleRatioMin = (rectanglesRatio / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MIN;
const double rectangleRatioMax = (rectanglesRatio / rectanglesCount) * COLOR_CHART_DETECT_THRESOLD_MAX;
// Final points
std::vector<cv::Point> filtered;
// Iterate over rectangles
for (unsigned int i = 0; i < rectanglesCount; i++) {
// Fetch rectangle and calculate size and ratio
const cv::RotatedRect rectangle = rectangles[i];
const double rectangleArea = rectangle.size.width * rectangle.size.height;
const double rectangleRatio = rectangle.size.width / rectangle.size.height;
// Check if the rectangle fits the average rectangle
if (rectangleArea >= rectangleAreaMin &&
rectangleArea <= rectangleAreaMax &&
rectangleRatio >= rectangleRatioMin &&
rectangleRatio <= rectangleRatioMax) {
// If so, then add these points to the valid points list
const unsigned int index = i * 4;
filtered.push_back(points[index + 0]);
filtered.push_back(points[index + 1]);
filtered.push_back(points[index + 2]);
filtered.push_back(points[index + 3]);
}
}
/**************************************************
* At this point we have filtered the contours to
* a list of valid points that are the corners of
* the small color squares inside the color chart.
* In the next step we need to calculate the
* rectangle that forms the actual color chart area.
**************************************************/
// Check if we found any final points
const unsigned int filteredCount = (unsigned int) filtered.size();
if (filteredCount > 0) {
// Find the center point
unsigned int totalX = 0;
unsigned int totalY = 0;
for (unsigned int i = 0; i < filteredCount; i++) {
totalX += filtered[i].x;
totalY += filtered[i].y;
}
const double centerX = (double) totalX / filteredCount;
const double centerY = (double) totalY / filteredCount;
const cv::Point center(
(unsigned int) lround(centerX),
(unsigned int) lround(centerY)
);
// Sort points based on distance from center (from far to near)
std::sort(filtered.begin(), filtered.end(), [centerX, centerY](const cv::Point a, const cv::Point b) {
const double distanceAX = centerX - a.x;
const double distanceAY = centerY - a.y;
const double distanceBX = centerX - b.x;
const double distanceBY = centerY - b.y;
const double distanceA = std::sqrt(distanceAX * distanceAX + distanceAY * distanceAY);
const double distanceB = std::sqrt(distanceBX * distanceBX + distanceBY * distanceBY);
return distanceA > distanceB;
});
// Create final corner points
cv::Point pointA = center;
cv::Point pointB = center;
cv::Point pointC = center;
cv::Point pointD = center;
// Find corners
for (unsigned int i = 0; i < filteredCount; i++) {
const cv::Point point = filtered[i];
if (point.x < pointA.x && point.y < pointA.y) pointA = point;
if (point.x > pointB.x && point.y < pointB.y) pointB = point;
if (point.x > pointC.x && point.y > pointC.y) pointC = point;
if (point.x < pointD.x && point.y > pointD.y) pointD = point;
// cv::circle(imageMat, cv::Point(filtered[i].x, filtered[i].y), 5, cv::Scalar(0, 1, 0), 2, 8);
}
// cv::circle(imageMat, pointA, 20, cv::Scalar(1, 0, 0), 2, 8);
// cv::circle(imageMat, pointB, 20, cv::Scalar(1, 0, 0), 2, 8);
// cv::circle(imageMat, pointC, 20, cv::Scalar(1, 0, 0), 2, 8);
// cv::circle(imageMat, pointD, 20, cv::Scalar(1, 0, 0), 2, 8);
// cv::circle(imageMat, center, 10, cv::Scalar(0, 0, 1), 2, 8);
// cv::imshow("image", imageMat);
// cv::waitKey(0);
// Store corners
const cv::Point2f corners[4] = {
cv::Point2f(pointA.x, pointA.y),
cv::Point2f(pointB.x, pointB.y),
cv::Point2f(pointC.x, pointC.y),
cv::Point2f(pointD.x, pointD.y)
};
// Store output size
const cv::Size outputSize(COLOR_CHART_INSIDE_WIDTH, COLOR_CHART_INSIDE_HEIGHT);
const cv::Point2f outputRect[4] = {
cv::Point2f(0, 0),
cv::Point2f(outputSize.width, 0),
cv::Point2f(outputSize.width, outputSize.height),
cv::Point2f(0, outputSize.height)
};
// Calculate and apply perspective transform
cv::Mat output;
cv::Mat transform = cv::getPerspectiveTransform(corners, outputRect);
cv::warpPerspective(imageMat, output, transform, outputSize, cv::INTER_LINEAR, cv::BORDER_CONSTANT, 0);
/**************************************************
* Right now we have a output image with the cropped
* color chart (inside area). Now we can iterate
* over the color squares and store the mean value.
**************************************************/
// Iterate over patches
for (unsigned int col = 0; col < COLOR_CHART_COLS; col++) {
for (unsigned int row = 0; row < COLOR_CHART_ROWS; row++) {
// Calculate position of patch
const unsigned int x = col * (COLOR_CHART_PATCH_WIDTH + COLOR_CHART_PATCH_PADDING);
const unsigned int y = row * (COLOR_CHART_PATCH_HEIGHT + COLOR_CHART_PATCH_PADDING);
const unsigned int index = COLOR_CHART_ROWS * col + row;
// Rectangle area of the patch
const cv::Rect rect(
x + 10,
y + 10,
COLOR_CHART_PATCH_WIDTH - 20,
COLOR_CHART_PATCH_HEIGHT - 20
);
// Calculate mean color from area
const cv::Scalar color = cv::mean(cv::Mat(output, rect));
// Set color
patches[index]->setRGB(
color[0],
color[1],
color[2]
);
}
}
}
}
}
return chart;
}
}
#endif // CAPTURE3_COLOR_CHART_UTILS_H
|
simple.c | /* This code is part of this project: Donato E, Ouyang M,
* Peguero-Isalguez C. Triangle counting with a multi-core computer.
* Proceedings of IEEE High Performance Extreme Computing Conference
* (HPEC), 2018, 1-7.
*
* Copyright (c) 2018 Ming Ouyang
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <omp.h>
#include "ompTri.h"
//ascending
static int cmpU64(const void *a, const void *b) {
return *(uint64_t*) a - *(uint64_t*) b;
}
//remove self-loops and multi-edges
void toSimpleGraph(void) {
uint64_t i, j, k;
#pragma omp parallel for private(j,k) schedule(guided)
for (i = 0; i < n; i++) {
if (degree[i] > 1)
qsort((void *) neighbor[i], degree[i], sizeof(uint64_t), cmpU64);
for (j = k = 0; j < degree[i]; j++) {
if (neighbor[i] [j] == i) {
fprintf(stderr, "self-loop %lu\n", i);
continue;
}
if (j > 0 && neighbor[i] [j] == neighbor[i] [j - 1]) {
fprintf(stderr, "multi-edge %lu %lu\n", i, neighbor[i] [j]);
continue;
}
neighbor[i] [k++] = neighbor[i] [j];
}
degree[i] = k;
}
}
|
pooling_layer.h | //Tencent is pleased to support the open source community by making FeatherCNN available.
//Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
//in compliance with the License. You may obtain a copy of the License at
//
//https://opensource.org/licenses/BSD-3-Clause
//
//Unless required by applicable law or agreed to in writing, software distributed
//under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
//CONDITIONS OF ANY KIND, either express or implied. See the License for the
//specific language governing permissions and limitations under the License.
#pragma once
#include "../feather_simple_generated.h"
#include "../layer.h"
#include <math.h>
#include <limits>
#define MAX(a,b) ((a)>(b))?(a):(b)
#define MIN(a,b) ((a)<(b))?(a):(b)
namespace feather
{
void ave_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float total = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
total += in[pos];
}
}
*out = total / kernel_h / kernel_w;
}
void max_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float max = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
max = (in[pos] > max) ? in[pos] : max;
}
}
*out = max;
}
class PoolingLayer : public Layer
{
public:
PoolingLayer(const LayerParameter *layer_param, const RuntimeParameter<float>* rt_param)
: stride_height(1),
stride_width(1),
Layer(layer_param, rt_param)
{
const PoolingParameter *pooling_param = layer_param->pooling_param();
kernel_height = pooling_param->kernel_h();
kernel_width = pooling_param->kernel_w();
pad_height = pooling_param->pad_h();
pad_width = pooling_param->pad_w();
stride_height = pooling_param->stride_h();
stride_width = pooling_param->stride_w();
stride_height = (stride_height <= 0) ? 1 : stride_height;
stride_width = (stride_width <= 0) ? 1 : stride_width;
global_pooling = pooling_param->global_pooling();
this->method = pooling_param->pool();
switch (this->method)
{
case PoolingParameter_::PoolMethod_MAX_:
_pool_inner_kernel = max_pool_inner_kernel;
break;
case PoolingParameter_::PoolMethod_AVE:
_pool_inner_kernel = ave_pool_inner_kernel;
break;
default:
fprintf(stderr, "Unsupported pool method\n");
}
//printf("kernel (%ld %ld) pad (%ld %ld) stride (%ld %ld) global_pooling %d\n",
// kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, global_pooling);
}
int Forward()
{
//fprintf(stderr, "pooling output (%d %d)\n", output_height, output_width);
//printf("input shape %ld %ld %ld kernel shape %ld %ld stride %ld %ld\n", input_channels, input_height, input_width, kernel_height, kernel_width, stride_height, stride_width);
const float *input = _bottom_blobs[_bottom[0]]->data();
float *output = _top_blobs[_top[0]]->data();
float *p = output;
int slot = input_channels * output_height;
#pragma omp parallel for schedule(static) num_threads(num_threads)
// for (int u=0;u<slot;u++)
// {
for (int i = 0; i < input_channels; ++i)
{
for (int j = 0; j < output_height; j ++)
{
// int i=slot/output_height, j=slot%output_height;
float *p = output + i * output_height * output_width + j * output_width;
for (int l = 0; l < output_width; l++) p[l] = (this->method != PoolingParameter_::PoolMethod_MAX_ ? 0 : -1 * std::numeric_limits<float>::max()) ;
int tmp_pos = j * (int)stride_height - (int)pad_height;
int x_min = MAX(tmp_pos, 0);
int x_max = MIN((int)(tmp_pos + kernel_height), (int) input_height);
for (int x = x_min; x < x_max; ++x)
{
int xpos = i * input_height * input_width + x * input_width;
for (int k = 0; k < output_width; k ++)
{
float total = (this->method != PoolingParameter_::PoolMethod_MAX_ ? 0 : -1 * std::numeric_limits<float>::max());
int counter = 0;
int local_pos = k * (int)stride_width - (int)pad_width;
int y_min = MAX(local_pos, 0);
int y_max = MIN((int)(local_pos + kernel_width), (int) input_width);
for (int y = y_min; y < y_max; ++y)
{
float value = input[xpos + y];
if (this->method != PoolingParameter_::PoolMethod_MAX_) total += value, counter++;
else total = total > value ? total : value;
}
if (this->method != PoolingParameter_::PoolMethod_MAX_)
p[k] += total / (counter) / kernel_height;
else p[k] = (p[k] > total) ? p[k] : total;
}
}
}
}
/*
#if 0
f(0)
#else
if(this->method == PoolingParameter_::PoolMethod_MAX_)
#endif
{
float f_minimal = std::numeric_limits<float>::max();
f_minimal = -f_minimal;
//printf("minimal float %f\n", f_minimal);
//Init output
for(int i = 0; i < output_channels * output_height * output_width; ++i)
{
output[i] = f_minimal;
}
const size_t img_size = input_height * input_width;
#pragma omp parallel for num_threads(num_threads) collapse(3)
for (size_t i = 0; i < output_channels; ++i)
{
for (size_t j = 0; j < output_height; ++j)
{
for(size_t u = 0; u < kernel_height; ++u)
{
int row = j * stride_height + u - pad_height;
if(row < 0 || row >= input_height)
continue;
for (size_t k = 0; k < output_width; ++k)
{
float* out_ptr = output + i * output_height * output_width + j * output_width + k;
float max = *out_ptr;
for(size_t v = 0; v < kernel_width; ++v)
{
int col = k * stride_height + v - pad_width;
if(col < 0 || col >= input_width)
continue;
const float* in_ptr = input + i * img_size + row * input_width + col;
float data = *in_ptr;
max = (max > data) ? max : data;
}
*out_ptr = max;
}
}
}
}
}
else
{
for (size_t i = 0; i < output_channels; ++i)
{
for (size_t j = 0; j < output_height; ++j)
{
for (size_t k = 0; k < output_width; ++k)
{
#if 0
float total = 0.0;
for (size_t m = 0; m != kernel_height; ++m)
{
for (size_t n = 0; n != kernel_width; ++n)
{
size_t pos = i * input_height* input_width + (j + m)* input_width + k + n;
total += input[pos];
}
}
*p++ = total / (kernel_height * kernel_width);
#else
size_t border_h = input_height - j * stride_height + pad_height;
size_t border_w = input_width - k * stride_width + pad_width;
size_t kernel_h = (kernel_height < border_h) ? kernel_height : border_h;
size_t kernel_w = (kernel_width < border_w) ? kernel_width : border_w;
//printf("pool shape %ld %ld %ld %ld %ld %ld %d %d\n", kernel_h, kernel_w, output_height, output_width, border_h, border_w, j, k);
int row = j * stride_height - pad_height;
int col = k * stride_width - pad_width;
if(row < 0)
{
kernel_h = kernel_height + row;
row = 0;
}
if(col < 0)
{
kernel_w = kernel_width + col;
col = 0;
}
size_t pos = i * input_height * input_width + row * input_width + col;
_pool_inner_kernel(p, input + pos, input_width, kernel_h, kernel_w);
++p;
#endif
}
}
}
}
*/
return 0;
}
int GenerateTopBlobs()
{
//Only accept a single bottom blob.
const Blob<float> *bottom_blob = _bottom_blobs[_bottom[0]];
input_height = bottom_blob->height();
input_width = bottom_blob->width();
input_channels = bottom_blob->channels();
//printf("layer %s\n", _name.c_str());
//printf("input %lu %lu %lu\n", input_channels, input_height, input_width);
if (global_pooling)
{
kernel_height = input_height;
kernel_width = input_width;
output_height = 1;
output_width = 1;
output_channels = input_channels;
}
else
{
//General pooling.
output_channels = input_channels;
output_height = static_cast<int>(ceil(static_cast<float>(input_height + 2 * pad_height - kernel_height) / stride_height)) + 1;
output_width = static_cast<int>(ceil(static_cast<float>(input_width + 2 * pad_width - kernel_width) / stride_width)) + 1;
}
_top_blobs[_top[0]] = new Blob<float>(1, output_channels, output_height, output_width);
_top_blobs[_top[0]]->Alloc();
//_top_blobs[_top[0]]->PrintBlobInfo();
return 0;
}
private:
size_t input_height;
size_t input_width;
size_t input_channels;
size_t output_height;
size_t output_width;
size_t output_channels;
size_t pad_height;
size_t pad_width;
size_t kernel_height;
size_t kernel_width;
size_t stride_height;
size_t stride_width;
bool global_pooling;
PoolingParameter_::PoolMethod method;
void (*_pool_inner_kernel)(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w);
};
};
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __aarch64__
#if 1
#include "gemm_symm_int8.h"
static void conv_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const int m = outch;
const int k = inch * kernel_size;
kernel_tm.create(m * k, (size_t)1u);
const int8_t *a = _kernel;
int8_t *sa = kernel_tm;
reorder_a((int8_t*)a, sa, m, k, k);
}
static void conv_im2col_sgemm_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel_tm, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// im2col
Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, 1UL, opt.workspace_allocator);
{
const int stride = kernel_h*kernel_w*outw*outh;
signed char* ret = (signed char*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
const int m = outch;
const int n = outw * outh;
const int k = inch * kernel_w * kernel_h;
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t *pData = bottom_im2col;
int8_t *pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, n);
}
// GEMM
int32_t *pc = top_blob;
const int8_t *pa = kernel_tm;
int8_t *pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr, opt);
}
#else
static void conv_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const signed char* kernel = _kernel;
// kernel memory packed 4 x 4
kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch*kernel_size; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel_tm, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// im2row
Mat bottom_im2row(kernel_h*kernel_w*inch, outw*outh, 1UL, opt.workspace_allocator);
{
int out_stride = kernel_h*kernel_w*inch*outw;
signed char* ret = (signed char*)bottom_im2row;
// #pragma omp parallel for num_threads(opt.num_threads)
for (int i=0; i<outh; i++)
{
int retID = out_stride * i;
for (int j=0; j<outw; j++)
{
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4*kernel_size, inch, out_size/4 + out_size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i+1);
const signed char* img2 = bottom_im2row.row<signed char>(i+2);
const signed char* img3 = bottom_im2row.row<signed char>(i+3);
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
int q=0;
for (; q+1<inch*kernel_size; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int j=0;
for (; j+3<N; j=j+4)
{
const signed char* vb = bottom_tm.channel(j/4);
const signed char* va = kernel_tm.channel(i/4);
#if __ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"// for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n"// r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"// for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"// realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n"// w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
"st1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(K) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[2*n]; // k0
sum0[n] += (int)va[1] * vb[2*n+1];
sum1[n] += (int)va[2] * vb[2*n]; // k1
sum1[n] += (int)va[3] * vb[2*n+1];
sum2[n] += (int)va[4] * vb[2*n]; // k2
sum2[n] += (int)va[5] * vb[2*n+1];
sum3[n] += (int)va[6] * vb[2*n]; // k3
sum3[n] += (int)va[7] * vb[2*n+1];
}
va += 8;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n=0; n<4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j<N; j++)
{
const signed char* vb = bottom_tm.channel(j/4 + j%4);
const signed char* va = kernel_tm.channel(i/4);
#if 0//__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int k=0;
for (; k+3<K; k=k+4)
{
int8x8_t _r0 = vld1_s8(vb); // i0[0-3]
int8x8x2_t _k = vld2_s8(va); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
va += 16;
vb += 4;
}
for (; k+1<K; k=k+2)
{
int8x8_t _r0 = vld1_s8(vb); // i0[0-3]
int8x8_t _k = vld1_s8(va); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
va += 8;
vb += 2;
}
for (; k<K; k++)
{
int8x8_t _r0 = vld1_s8(vb); // i0[0-3]
int8x8_t _k = vld1_s8(va); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
va += 4;
vb += 1;
}
vst1q_lane_s32(output0, _sum, 0);
vst1q_lane_s32(output1, _sum, 1);
vst1q_lane_s32(output2, _sum, 2);
vst1q_lane_s32(output3, _sum, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int k=0;
for (; k+1<K; k=k+2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k<K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
int* output = top_blob.channel(i);
int j=0;
for (; j+3<N; j=j+4)
{
const signed char* vb = bottom_tm.channel(j/4);
const signed char* va = kernel_tm.channel(i/4 + i%4);
#if __ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int k=0;
for (; k+1<K; k=k+2)
{
int8x8_t _r0 = vld1_s8(vb); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(va); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
va += 2;
vb += 8;
}
for (; k<K; k++)
{
int8x8_t _r0 = vld1_s8(vb); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(va); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
va += 1;
vb += 4;
}
vst1q_s32(output, _sum);
#else
int sum[4] = {0};
int k=0;
for (; k+1<K; k=k+2)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[2*n];
sum[n] += (int)va[1] * vb[2*n+1];
}
va += 2;
vb += 8;
}
for (; k<K; k++)
{
for (int n=0; n<4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n=0; n<4; n++)
{
output[n] = sum[n];
}
#endif
output += 4;
}
for (; j<N; j++)
{
int sum = 0;
const signed char* vb = bottom_tm.channel(j/4 + j%4);
const signed char* va = kernel_tm.channel(i/4 + i%4);
for (int k=0; k<K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
#endif
#else
static void conv_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size)
{
const signed char* kernel = _kernel;
#if __ARM_NEON && __aarch64__
// kernel memory packed 8 x 8
kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4, (size_t)1u);
#else
// kernel memory packed 4 x 8
kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4, (size_t)1u);
#endif
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
const signed char* k4 = kernel + (p+4)*inch*kernel_size;
const signed char* k5 = kernel + (p+5)*inch*kernel_size;
const signed char* k6 = kernel + (p+6)*inch*kernel_size;
const signed char* k7 = kernel + (p+7)*inch*kernel_size;
signed char* ktmp = kernel_tm.channel(p/8);
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
#endif
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
const signed char* k1 = kernel + (p+1)*inch*kernel_size;
const signed char* k2 = kernel + (p+2)*inch*kernel_size;
const signed char* k3 = kernel + (p+3)*inch*kernel_size;
#if __ARM_NEON && __aarch64__
signed char* ktmp = kernel_tm.channel(p/8 + (p%8)/4);
#else
signed char* ktmp = kernel_tm.channel(p/4);
#endif // __ARM_NEON && __aarch64__
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch*kernel_size;
#if __ARM_NEON && __aarch64__
signed char* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4);
#else
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
for (int q=0; q<inch*kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv_im2col_sgemm_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, \
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// im2col
Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, 1UL, opt.workspace_allocator);
{
const int stride = kernel_h*kernel_w*outw*outh;
signed char* ret = (signed char*)bottom_im2col;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<inch; p++)
{
const signed char* input = bottom_blob.channel(p);
int retID = stride * p;
for (int u=0; u<kernel_h; u++)
{
for (int v=0; v<kernel_w; v++)
{
for (int i=0; i<outh; i++)
{
for (int j=0; j<outw; j++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// bottom_im2col memory packed 8 x 8
Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_im2col.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/8);
for (int q=0; q<inch*kernel_size; q++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"st1 {v0.8b}, [%1] \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "cc", "memory", "v0"
);
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1] \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "cc", "memory", "d0"
);
#endif // __aarch64__
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
#endif // __ARM_NEON
tmpptr += 8;
img0 += out_size;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<out_size; i++)
{
const signed char* img0 = bottom_im2col.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/8 + i%8);
for (int q=0; q<inch*kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += out_size;
}
}
}
// sgemm(int M, int N, int L, float* A, float* B, float* C)
{
//int M = outch; // outch
int N = outw * outh; // outsize or out stride
int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = pp * 8;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int* output4 = top_blob.channel(i+4);
int* output5 = top_blob.channel(i+5);
int* output6 = top_blob.channel(i+6);
int* output7 = top_blob.channel(i+7);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
const signed char* va = kernel_tm.channel(i/8);
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum0n
"eor v18.16b, v18.16b, v18.16b \n" // sum1
"eor v19.16b, v19.16b, v19.16b \n" // sum1n
"eor v20.16b, v20.16b, v20.16b \n" // sum2
"eor v21.16b, v21.16b, v21.16b \n" // sum2n
"eor v22.16b, v22.16b, v22.16b \n" // sum3
"eor v23.16b, v23.16b, v23.16b \n" // sum3n
"eor v24.16b, v24.16b, v24.16b \n" // sum4
"eor v25.16b, v25.16b, v25.16b \n" // sum4n
"eor v26.16b, v26.16b, v26.16b \n" // sum5
"eor v27.16b, v27.16b, v27.16b \n" // sum5n
"eor v28.16b, v28.16b, v28.16b \n" // sum6
"eor v29.16b, v29.16b, v29.16b \n" // sum6n
"eor v30.16b, v30.16b, v30.16b \n" // sum7
"eor v31.16b, v31.16b, v31.16b \n" // sum7n
"lsr w4, %w20, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%8], #32 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v1.8h, v1.8b, #0 \n" // k01 - k71
"sshll v2.8h, v2.8b, #0 \n" // k02 - k72
"sshll v3.8h, v3.8b, #0 \n" // k03 - k73
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
"sshll v9.8h, v9.8b, #0 \n" // a01 - a71
"sshll v10.8h, v10.8b, #0 \n" // a02 - a72
"sshll v11.8h, v11.8b, #0 \n" // a03 - a73
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
"smlal v24.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a70) * k40
"smlal2 v25.4s, v8.8h, v0.h[4] \n"//
"smlal v26.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a70) * k50
"smlal2 v27.4s, v8.8h, v0.h[5] \n"//
"smlal v28.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a70) * k60
"smlal2 v29.4s, v8.8h, v0.h[6] \n"//
"smlal v30.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a70) * k70
"smlal2 v31.4s, v8.8h, v0.h[7] \n"//
// k1
"smlal v16.4s, v9.4h, v1.h[0] \n"// sum0 += (a01-a71) * k01
"smlal2 v17.4s, v9.8h, v1.h[0] \n"//
"smlal v18.4s, v9.4h, v1.h[1] \n"// sum1 += (a01-a71) * k11
"smlal2 v19.4s, v9.8h, v1.h[1] \n"//
"smlal v20.4s, v9.4h, v1.h[2] \n"// sum2 += (a01-a71) * k21
"smlal2 v21.4s, v9.8h, v1.h[2] \n"//
"smlal v22.4s, v9.4h, v1.h[3] \n"// sum3 += (a01-a71) * k31
"smlal2 v23.4s, v9.8h, v1.h[3] \n"//
"smlal v24.4s, v9.4h, v1.h[4] \n"// sum4 += (a01-a71) * k41
"smlal2 v25.4s, v9.8h, v1.h[4] \n"//
"smlal v26.4s, v9.4h, v1.h[5] \n"// sum5 += (a01-a71) * k51
"smlal2 v27.4s, v9.8h, v1.h[5] \n"//
"smlal v28.4s, v9.4h, v1.h[6] \n"// sum6 += (a01-a71) * k61
"smlal2 v29.4s, v9.8h, v1.h[6] \n"//
"smlal v30.4s, v9.4h, v1.h[7] \n"// sum7 += (a01-a71) * k71
"smlal2 v31.4s, v9.8h, v1.h[7] \n"//
// k2
"smlal v16.4s, v10.4h, v2.h[0] \n"// sum0 += (a02-a72) * k02
"smlal2 v17.4s, v10.8h, v2.h[0] \n"//
"smlal v18.4s, v10.4h, v2.h[1] \n"// sum1 += (a02-a72) * k12
"smlal2 v19.4s, v10.8h, v2.h[1] \n"//
"smlal v20.4s, v10.4h, v2.h[2] \n"// sum2 += (a02-a72) * k22
"smlal2 v21.4s, v10.8h, v2.h[2] \n"//
"smlal v22.4s, v10.4h, v2.h[3] \n"// sum3 += (a02-a72) * k32
"smlal2 v23.4s, v10.8h, v2.h[3] \n"//
"smlal v24.4s, v10.4h, v2.h[4] \n"// sum4 += (a02-a72) * k42
"smlal2 v25.4s, v10.8h, v2.h[4] \n"//
"smlal v26.4s, v10.4h, v2.h[5] \n"// sum5 += (a02-a72) * k52
"smlal2 v27.4s, v10.8h, v2.h[5] \n"//
"smlal v28.4s, v10.4h, v2.h[6] \n"// sum6 += (a02-a72) * k62
"smlal2 v29.4s, v10.8h, v2.h[6] \n"//
"smlal v30.4s, v10.4h, v2.h[7] \n"// sum7 += (a02-a72) * k72
"smlal2 v31.4s, v10.8h, v2.h[7] \n"//
// k3
"smlal v16.4s, v11.4h, v3.h[0] \n"// sum0 += (a03-a73) * k03
"smlal2 v17.4s, v11.8h, v3.h[0] \n"//
"smlal v18.4s, v11.4h, v3.h[1] \n"// sum1 += (a03-a73) * k13
"smlal2 v19.4s, v11.8h, v3.h[1] \n"//
"smlal v20.4s, v11.4h, v3.h[2] \n"// sum2 += (a03-a73) * k23
"smlal2 v21.4s, v11.8h, v3.h[2] \n"//
"smlal v22.4s, v11.4h, v3.h[3] \n"// sum3 += (a03-a73) * k33
"smlal2 v23.4s, v11.8h, v3.h[3] \n"//
"smlal v24.4s, v11.4h, v3.h[4] \n"// sum4 += (a03-a73) * k43
"smlal2 v25.4s, v11.8h, v3.h[4] \n"//
"smlal v26.4s, v11.4h, v3.h[5] \n"// sum5 += (a03-a73) * k53
"smlal2 v27.4s, v11.8h, v3.h[5] \n"//
"smlal v28.4s, v11.4h, v3.h[6] \n"// sum6 += (a03-a73) * k63
"smlal2 v29.4s, v11.8h, v3.h[6] \n"//
"smlal v30.4s, v11.4h, v3.h[7] \n"// sum7 += (a03-a73) * k73
"smlal2 v31.4s, v11.8h, v3.h[7] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b}, [%9], #8 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.8b}, [%8], #8 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
"smlal v24.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a70) * k40
"smlal2 v25.4s, v8.8h, v0.h[4] \n"//
"smlal v26.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a70) * k50
"smlal2 v27.4s, v8.8h, v0.h[5] \n"//
"smlal v28.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a70) * k60
"smlal2 v29.4s, v8.8h, v0.h[6] \n"//
"smlal v30.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a70) * k70
"smlal2 v31.4s, v8.8h, v0.h[7] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0] \n"
"st1 {v18.4s, v19.4s}, [%1] \n"
"st1 {v20.4s, v21.4s}, [%2] \n"
"st1 {v22.4s, v23.4s}, [%3] \n"
"st1 {v24.4s, v25.4s}, [%4] \n"
"st1 {v26.4s, v27.4s}, [%5] \n"
"st1 {v28.4s, v29.4s}, [%6] \n"
"st1 {v30.4s, v31.4s}, [%7] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(output4), // %4
"=r"(output5), // %5
"=r"(output6), // %6
"=r"(output7), // %7
"=r"(vb), // %8
"=r"(va) // %9
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(output4),
"5"(output5),
"6"(output6),
"7"(output7),
"8"(vb),
"9"(va),
"r"(L) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
#else
int sum0[8] = {0};
int sum1[8] = {0};
int sum2[8] = {0};
int sum3[8] = {0};
int sum4[8] = {0};
int sum5[8] = {0};
int sum6[8] = {0};
int sum7[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
sum4[n] += (int)va[4] * vb[n];
sum5[n] += (int)va[5] * vb[n];
sum6[n] += (int)va[6] * vb[n];
sum7[n] += (int)va[7] * vb[n];
va += 8;
sum0[n] += (int)va[0] * vb[n+8];
sum1[n] += (int)va[1] * vb[n+8];
sum2[n] += (int)va[2] * vb[n+8];
sum3[n] += (int)va[3] * vb[n+8];
sum4[n] += (int)va[4] * vb[n+8];
sum5[n] += (int)va[5] * vb[n+8];
sum6[n] += (int)va[6] * vb[n+8];
sum7[n] += (int)va[7] * vb[n+8];
va += 8;
sum0[n] += (int)va[0] * vb[n+16];
sum1[n] += (int)va[1] * vb[n+16];
sum2[n] += (int)va[2] * vb[n+16];
sum3[n] += (int)va[3] * vb[n+16];
sum4[n] += (int)va[4] * vb[n+16];
sum5[n] += (int)va[5] * vb[n+16];
sum6[n] += (int)va[6] * vb[n+16];
sum7[n] += (int)va[7] * vb[n+16];
va += 8;
sum0[n] += (int)va[0] * vb[n+24];
sum1[n] += (int)va[1] * vb[n+24];
sum2[n] += (int)va[2] * vb[n+24];
sum3[n] += (int)va[3] * vb[n+24];
sum4[n] += (int)va[4] * vb[n+24];
sum5[n] += (int)va[5] * vb[n+24];
sum6[n] += (int)va[6] * vb[n+24];
sum7[n] += (int)va[7] * vb[n+24];
va += 8;
sum0[n] += (int)va[0] * vb[n+32];
sum1[n] += (int)va[1] * vb[n+32];
sum2[n] += (int)va[2] * vb[n+32];
sum3[n] += (int)va[3] * vb[n+32];
sum4[n] += (int)va[4] * vb[n+32];
sum5[n] += (int)va[5] * vb[n+32];
sum6[n] += (int)va[6] * vb[n+32];
sum7[n] += (int)va[7] * vb[n+32];
va += 8;
sum0[n] += (int)va[0] * vb[n+40];
sum1[n] += (int)va[1] * vb[n+40];
sum2[n] += (int)va[2] * vb[n+40];
sum3[n] += (int)va[3] * vb[n+40];
sum4[n] += (int)va[4] * vb[n+40];
sum5[n] += (int)va[5] * vb[n+40];
sum6[n] += (int)va[6] * vb[n+40];
sum7[n] += (int)va[7] * vb[n+40];
va += 8;
sum0[n] += (int)va[0] * vb[n+48];
sum1[n] += (int)va[1] * vb[n+48];
sum2[n] += (int)va[2] * vb[n+48];
sum3[n] += (int)va[3] * vb[n+48];
sum4[n] += (int)va[4] * vb[n+48];
sum5[n] += (int)va[5] * vb[n+48];
sum6[n] += (int)va[6] * vb[n+48];
sum7[n] += (int)va[7] * vb[n+48];
va += 8;
sum0[n] += (int)va[0] * vb[n+56];
sum1[n] += (int)va[1] * vb[n+56];
sum2[n] += (int)va[2] * vb[n+56];
sum3[n] += (int)va[3] * vb[n+56];
sum4[n] += (int)va[4] * vb[n+56];
sum5[n] += (int)va[5] * vb[n+56];
sum6[n] += (int)va[6] * vb[n+56];
sum7[n] += (int)va[7] * vb[n+56];
va -= 56;
}
va += 64;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
sum4[n] += (int)va[4] * vb[n];
sum5[n] += (int)va[5] * vb[n];
sum6[n] += (int)va[6] * vb[n];
sum7[n] += (int)va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif // __aarch64__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j<N; j++)
{
signed char* vb = bottom_tm.channel(j/8 + j%8);
const signed char* va = kernel_tm.channel(i/8);
#if __aarch64__
asm volatile(
"eor v14.16b, v14.16b, v14.16b \n" // sum0_3
"eor v15.16b, v15.16b, v15.16b \n" // sum4_7
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"eor v20.16b, v20.16b, v20.16b \n" // sum4
"eor v21.16b, v21.16b, v21.16b \n" // sum5
"eor v22.16b, v22.16b, v22.16b \n" // sum6
"eor v23.16b, v23.16b, v23.16b \n" // sum7
"lsr w4, %w20, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n" // k
//"prfm pldl1keep, [%8, #128] \n"
"ld1 {v4.8b}, [%8] \n" // d
"add %8, %8, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v1.8h, v1.8b, #0 \n" // k01 - k71
"sshll v2.8h, v2.8b, #0 \n" // k02 - k72
"sshll v3.8h, v3.8b, #0 \n" // k03 - k73
"sshll v4.8h, v4.8b, #0 \n" // a00 - a30
// k0
"smlal v16.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k70) * a00
"smlal2 v17.4s, v0.8h, v4.h[0] \n"//
"smlal v18.4s, v1.4h, v4.h[1] \n"// sum1 += (k01-k71) * a10
"smlal2 v19.4s, v1.8h, v4.h[1] \n"//
"smlal v20.4s, v2.4h, v4.h[2] \n"// sum2 += (k02-k72) * a20
"smlal2 v21.4s, v2.8h, v4.h[2] \n"//
"smlal v22.4s, v3.4h, v4.h[3] \n"// sum3 += (k03-k73) * a30
"smlal2 v23.4s, v3.8h, v4.h[3] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"add v16.4s, v16.4s, v18.4s \n"
"add v17.4s, v17.4s, v19.4s \n"
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v14.4s, v16.4s, v20.4s \n"
"add v15.4s, v17.4s, v21.4s \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8b}, [%9], #8 \n"
//"prfm pldl1keep, [%8, #128] \n"
"ld1 {v4.8b}, [%8] \n"
"add %8, %8, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k70
"sshll v4.8h, v4.8b, #0 \n" // a00
// k0
"smlal v14.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k70) * a00
"smlal2 v15.4s, v0.8h, v4.h[0] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v14.s}[0], [%0] \n"
"st1 {v14.s}[1], [%1] \n"
"st1 {v14.s}[2], [%2] \n"
"st1 {v14.s}[3], [%3] \n"
"st1 {v15.s}[0], [%4] \n"
"st1 {v15.s}[1], [%5] \n"
"st1 {v15.s}[2], [%6] \n"
"st1 {v15.s}[3], [%7] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(output4), // %4
"=r"(output5), // %5
"=r"(output6), // %6
"=r"(output7), // %7
"=r"(vb), // %8
"=r"(va) // %9
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(output4),
"5"(output5),
"6"(output6),
"7"(output7),
"8"(vb),
"9"(va),
"r"(L) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int k=0; k<L; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
sum4 += (int)va[4] * vb[0];
sum5 += (int)va[5] * vb[0];
sum6 += (int)va[6] * vb[0];
sum7 += (int)va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __aarch64__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
#endif // __ARM_NEON && __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i+1);
int* output2 = top_blob.channel(i+2);
int* output3 = top_blob.channel(i+3);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4);
#else
const signed char* va = kernel_tm.channel(i/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum0n
"eor v18.16b, v18.16b, v18.16b \n" // sum1
"eor v19.16b, v19.16b, v19.16b \n" // sum1n
"eor v20.16b, v20.16b, v20.16b \n" // sum2
"eor v21.16b, v21.16b, v21.16b \n" // sum2n
"eor v22.16b, v22.16b, v22.16b \n" // sum3
"eor v23.16b, v23.16b, v23.16b \n" // sum3n
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b, v1.8b}, [%5], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%4], #32 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31
"sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
"sshll v9.8h, v9.8b, #0 \n" // a01 - a71
"sshll v10.8h, v10.8b, #0 \n" // a02 - a72
"sshll v11.8h, v11.8b, #0 \n" // a03 - a73
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
// k1
"smlal v16.4s, v9.4h, v0.h[4] \n"// sum0 += (a01-a71) * k01
"smlal2 v17.4s, v9.8h, v0.h[4] \n"//
"smlal v18.4s, v9.4h, v0.h[5] \n"// sum1 += (a01-a71) * k11
"smlal2 v19.4s, v9.8h, v0.h[5] \n"//
"smlal v20.4s, v9.4h, v0.h[6] \n"// sum2 += (a01-a71) * k21
"smlal2 v21.4s, v9.8h, v0.h[6] \n"//
"smlal v22.4s, v9.4h, v0.h[7] \n"// sum3 += (a01-a71) * k31
"smlal2 v23.4s, v9.8h, v0.h[7] \n"//
// k2
"smlal v16.4s, v10.4h, v1.h[0] \n"// sum0 += (a02-a72) * k02
"smlal2 v17.4s, v10.8h, v1.h[0] \n"//
"smlal v18.4s, v10.4h, v1.h[1] \n"// sum1 += (a02-a72) * k12
"smlal2 v19.4s, v10.8h, v1.h[1] \n"//
"smlal v20.4s, v10.4h, v1.h[2] \n"// sum2 += (a02-a72) * k22
"smlal2 v21.4s, v10.8h, v1.h[2] \n"//
"smlal v22.4s, v10.4h, v1.h[3] \n"// sum3 += (a02-a72) * k32
"smlal2 v23.4s, v10.8h, v1.h[3] \n"//
// k3
"smlal v16.4s, v11.4h, v1.h[4] \n"// sum0 += (a03-a73) * k03
"smlal2 v17.4s, v11.8h, v1.h[4] \n"//
"smlal v18.4s, v11.4h, v1.h[5] \n"// sum1 += (a03-a73) * k13
"smlal2 v19.4s, v11.8h, v1.h[5] \n"//
"smlal v20.4s, v11.4h, v1.h[6] \n"// sum2 += (a03-a73) * k23
"smlal2 v21.4s, v11.8h, v1.h[6] \n"//
"smlal v22.4s, v11.4h, v1.h[7] \n"// sum3 += (a03-a73) * k33
"smlal2 v23.4s, v11.8h, v1.h[7] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b}, [%5] \n"
//"prfm pldl1keep, [%4, #128] \n"
"ld1 {v8.8b}, [%4], #8 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10
"smlal2 v19.4s, v8.8h, v0.h[1] \n"//
"smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20
"smlal2 v21.4s, v8.8h, v0.h[2] \n"//
"smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30
"smlal2 v23.4s, v8.8h, v0.h[3] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0] \n"
"st1 {v18.4s, v19.4s}, [%1] \n"
"st1 {v20.4s, v21.4s}, [%2] \n"
"st1 {v22.4s, v23.4s}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
// K loop
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"vmov.s32 q14, #0 \n"
"vmov.s32 q15, #0 \n"
"lsr r4, %12, #3 \n"// r4 = nn = L >> 3
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d8-d11}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q7, d11 \n"// a30-a37
"vmovl.s8 q6, d10 \n"// a20-a27
"vmovl.s8 q5, d9 \n"// a10-a17
"vmovl.s8 q4, d8 \n"// a00-a07
"pld [%5, #128] \n"
"vld1.s8 {d0-d3}, [%5]! \n"// kptr k00-k30,k01-k31, k02-k32,k03-k33, k04-k34,k05-k35, k06-k36,k07-k37 k(outch)(inch)
"vmovl.s8 q3, d3 \n"// k06-k36,k07-k37
"vmovl.s8 q2, d2 \n"// k04-k34,k05-k35
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q8, d8, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q9, d9, d0[0] \n"
"vmlal.s16 q10, d8, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q11, d9, d0[1] \n"
"vmlal.s16 q12, d8, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q13, d9, d0[2] \n"
"vmlal.s16 q14, d8, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q15, d9, d0[3] \n"
"vmlal.s16 q8, d10, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q9, d11, d1[0] \n"
"vmlal.s16 q10, d10, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q11, d11, d1[1] \n"
"vmlal.s16 q12, d10, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q13, d11, d1[2] \n"
"vmlal.s16 q14, d10, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q15, d11, d1[3] \n"
"pld [%4, #128] \n"
"vld1.s8 {d8-d9}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d9 \n"// a10-a17
"vmovl.s8 q4, d8 \n"// a00-a07
"vmlal.s16 q8, d12, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q9, d13, d2[0] \n"
"vmlal.s16 q10, d12, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q11, d13, d2[1] \n"
"vmlal.s16 q12, d12, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q13, d13, d2[2] \n"
"vmlal.s16 q14, d12, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q15, d13, d2[3] \n"
"vmlal.s16 q8, d14, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q9, d15, d3[0] \n"
"vmlal.s16 q10, d14, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q11, d15, d3[1] \n"
"vmlal.s16 q12, d14, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q13, d15, d3[2] \n"
"vmlal.s16 q14, d14, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q15, d15, d3[3] \n"
"pld [%4, #128] \n"
"vld1.s8 {d0-d1}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q1, d1 \n"// a10-a17
"vmovl.s8 q0, d0 \n"// a00-a07
"vmlal.s16 q8, d8, d4[0] \n"// sum0 += (a40-a47) * k04
"vmlal.s16 q9, d9, d4[0] \n"
"vmlal.s16 q10, d8, d4[1] \n"// sum1 += (a40-a47) * k14
"vmlal.s16 q11, d9, d4[1] \n"
"vmlal.s16 q12, d8, d4[2] \n"// sum2 += (a40-a47) * k24
"vmlal.s16 q13, d9, d4[2] \n"
"vmlal.s16 q14, d8, d4[3] \n"// sum3 += (a40-a47) * k34
"vmlal.s16 q15, d9, d4[3] \n"
"vmlal.s16 q8, d10, d5[0] \n"// sum0 += (a50-a57) * k05
"vmlal.s16 q9, d11, d5[0] \n"
"vmlal.s16 q10, d10, d5[1] \n"// sum1 += (a50-a57) * k15
"vmlal.s16 q11, d11, d5[1] \n"
"vmlal.s16 q12, d10, d5[2] \n"// sum2 += (a50-a57) * k25
"vmlal.s16 q13, d11, d5[2] \n"
"vmlal.s16 q14, d10, d5[3] \n"// sum3 += (a50-a57) * k35
"vmlal.s16 q15, d11, d5[3] \n"
"vmlal.s16 q8, d0, d6[0] \n"// sum0 += (a60-a67) * k06
"vmlal.s16 q9, d1, d6[0] \n"
"vmlal.s16 q10, d0, d6[1] \n"// sum1 += (a60-a67) * k16
"vmlal.s16 q11, d1, d6[1] \n"
"vmlal.s16 q12, d0, d6[2] \n"// sum2 += (a60-a67) * k26
"vmlal.s16 q13, d1, d6[2] \n"
"vmlal.s16 q14, d0, d6[3] \n"// sum3 += (a60-a67) * k36
"vmlal.s16 q15, d1, d6[3] \n"
"vmlal.s16 q8, d2, d7[0] \n"// sum0 += (a70-a77) * k07
"vmlal.s16 q9, d3, d7[0] \n"
"vmlal.s16 q10, d2, d7[1] \n"// sum1 += (a70-a77) * k17
"vmlal.s16 q11, d3, d7[1] \n"
"vmlal.s16 q12, d2, d7[2] \n"// sum2 += (a70-a77) * k27
"vmlal.s16 q13, d3, d7[2] \n"
"vmlal.s16 q14, d2, d7[3] \n"// sum3 += (a70-a77) * k37
"vmlal.s16 q15, d3, d7[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #7 \n"// r4 = remain = inch & 7
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a70 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q8, d2, d0[0] \n"// sum0 += (a00-a70) * k00
"vmlal.s16 q9, d3, d0[0] \n"
"vmlal.s16 q10, d2, d0[1] \n"// sum1 += (a00-a70) * k10
"vmlal.s16 q11, d3, d0[1] \n"
"vmlal.s16 q12, d2, d0[2] \n"// sum2 += (a00-a70) * k20
"vmlal.s16 q13, d3, d0[2] \n"
"vmlal.s16 q14, d2, d0[3] \n"// sum3 += (a00-a70) * k30
"vmlal.s16 q15, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d16-d19}, [%0] \n"
"vst1.s32 {d20-d23}, [%1] \n"
"vst1.s32 {d24-d27}, [%2] \n"
"vst1.s32 {d28-d31}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
int sum0[8] = {0};
int sum1[8] = {0};
int sum2[8] = {0};
int sum3[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
va += 4;
sum0[n] += (int)va[0] * vb[n+8];
sum1[n] += (int)va[1] * vb[n+8];
sum2[n] += (int)va[2] * vb[n+8];
sum3[n] += (int)va[3] * vb[n+8];
va += 4;
sum0[n] += (int)va[0] * vb[n+16];
sum1[n] += (int)va[1] * vb[n+16];
sum2[n] += (int)va[2] * vb[n+16];
sum3[n] += (int)va[3] * vb[n+16];
va += 4;
sum0[n] += (int)va[0] * vb[n+24];
sum1[n] += (int)va[1] * vb[n+24];
sum2[n] += (int)va[2] * vb[n+24];
sum3[n] += (int)va[3] * vb[n+24];
va += 4;
sum0[n] += (int)va[0] * vb[n+32];
sum1[n] += (int)va[1] * vb[n+32];
sum2[n] += (int)va[2] * vb[n+32];
sum3[n] += (int)va[3] * vb[n+32];
va += 4;
sum0[n] += (int)va[0] * vb[n+40];
sum1[n] += (int)va[1] * vb[n+40];
sum2[n] += (int)va[2] * vb[n+40];
sum3[n] += (int)va[3] * vb[n+40];
va += 4;
sum0[n] += (int)va[0] * vb[n+48];
sum1[n] += (int)va[1] * vb[n+48];
sum2[n] += (int)va[2] * vb[n+48];
sum3[n] += (int)va[3] * vb[n+48];
va += 4;
sum0[n] += (int)va[0] * vb[n+56];
sum1[n] += (int)va[1] * vb[n+56];
sum2[n] += (int)va[2] * vb[n+56];
sum3[n] += (int)va[3] * vb[n+56];
va -= 28;
}
va += 32;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n=0; n<8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __ARM_NEON
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j<N; j++)
{
signed char* vb = bottom_tm.channel(j/8 + j%8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4);
#else
const signed char* va = kernel_tm.channel(i/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v14.16b, v14.16b, v14.16b \n" // sum0_3
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b, v1.8b}, [%5], #16 \n" // k
//"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.8b}, [%4] \n" // d
"add %4, %4, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31
"sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33
"sshll v4.8h, v4.8b, #0 \n" // a00 - a30
"subs w4, w4, #1 \n"
// k0
"smlal v16.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k30) * a00
"smlal2 v17.4s, v0.8h, v4.h[0] \n"// sum1 += (k01-k31) * a10
"smlal v18.4s, v1.4h, v4.h[1] \n"// sum2 += (k02-k32) * a20
"smlal2 v19.4s, v1.8h, v4.h[1] \n"// sum3 += (k03-k33) * a30
"bne 0b \n"
"add v16.4s, v16.4s, v18.4s \n"
"add v17.4s, v17.4s, v19.4s \n"
"add v14.4s, v16.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.8b}, [%5] \n"
//"prfm pldl1keep, [4, #128] \n"
"ld1 {v4.8b}, [%4] \n"
"add %4, %4, #1 \n"
"add %5, %5, #4 \n"
"subs w4, w4, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30
"sshll v4.8h, v4.8b, #0 \n" // a00
// k0
"smlal v14.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k30) * a00
"bne 2b \n"
"3: \n"
"st1 {v14.s}[0], [%0] \n"
"st1 {v14.s}[1], [%1] \n"
"st1 {v14.s}[2], [%2] \n"
"st1 {v14.s}[3], [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"veor q12, q12, q12 \n"
"veor q13, q13, q13 \n"
"vmov.s32 q14, #0 \n"
"lsr r4, %12, #3 \n"// r4 = nn = L >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d0}, [%4]! \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"vmovl.s8 q0, d0 \n"// a00-a07
"pld [%5, #128] \n"
"vld1.s8 {d2-d5}, [%5]! \n"// kptr k00-k30,k01-k31, k02-k32,k03-k33, k04-k34,k05-k35, k06-k36,k07-k37 k(outch)(inch)
"vmovl.s8 q4, d5 \n"// k06-k36,k07-k37
"vmovl.s8 q3, d4 \n"// k04-k34,k05-k35
"vmovl.s8 q2, d3 \n"// k02-k32,k03-k33
"vmovl.s8 q1, d2 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d2, d0[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d3, d0[1] \n"// (k01-k31) * a01
"vmlal.s16 q8, d4, d0[2] \n"// (k02-k32) * a02
"vmlal.s16 q9, d5, d0[3] \n"// (k03-k33) * a03
"vmlal.s16 q10, d6, d1[0] \n"// (k04-k34) * a04
"vmlal.s16 q11, d7, d1[1] \n"// (k05-k35) * a05
"vmlal.s16 q12, d8, d1[2] \n"// (k06-k36) * a06
"vmlal.s16 q13, d9, d1[3] \n"// (k07-k37) * a07
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q11, q11, q10 \n"
"vadd.s32 q13, q13, q12 \n"
"vadd.s32 q9, q9, q6 \n"
"vadd.s32 q13, q13, q11 \n"
"vadd.s32 q14, q13, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #7 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q14, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d28[0]}, [%0] \n"
"vst1.s32 {d28[1]}, [%1] \n"
"vst1.s32 {d29[0]}, [%2] \n"
"vst1.s32 {d29[1]}, [%3] \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(output2), // %2
"=r"(output3), // %3
"=r"(vb), // %4
"=r"(va) // %5
: "0"(output0),
"1"(output1),
"2"(output2),
"3"(output3),
"4"(vb),
"5"(va),
"r"(L) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14"
);
#endif // __aarch64__
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int k=0; k<L; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __ARM_NEON
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_outch_start; i<outch; i++)
{
int* output = top_blob.channel(i);
int j=0;
for (; j+7<N; j=j+8)
{
signed char* vb = bottom_tm.channel(j/8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4);
#else
const signed char* va = kernel_tm.channel(i/4 + i%4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum0n
"lsr w4, %w6, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8b}, [%2] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%1], #32 \n"
"add %2, %2, #4 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k03
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
"sshll v9.8h, v9.8b, #0 \n" // a01 - a71
"sshll v10.8h, v10.8b, #0 \n" // a02 - a72
"sshll v11.8h, v11.8b, #0 \n" // a03 - a73
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
// k1
"smlal v16.4s, v9.4h, v0.h[1] \n"// sum0 += (a01-a71) * k01
"smlal2 v17.4s, v9.8h, v0.h[1] \n"//
// k2
"smlal v16.4s, v10.4h, v0.h[2] \n"// sum0 += (a02-a72) * k02
"smlal2 v17.4s, v10.8h, v0.h[2] \n"//
// k3
"smlal v16.4s, v11.4h, v0.h[3] \n"// sum0 += (a03-a73) * k03
"smlal2 v17.4s, v11.8h, v0.h[3] \n"//
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
//"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8b}, [%2] \n"
//"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.8b}, [%1], #8 \n"
"add %2, %2, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" // k00 - k30
"sshll v8.8h, v8.8b, #0 \n" // a00 - a70
// k0
"smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00
"smlal2 v17.4s, v8.8h, v0.h[0] \n"//
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0] \n"
: "=r"(output), // %0
"=r"(vb), // %1
"=r"(va) // %2
: "0"(output),
"1"(vb),
"2"(va),
"r"(L) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"
);
#else
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #3 \n"// r4 = nn = inch >> 3
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"pld [%2, #128] \n"
"vld1.s8 {d0}, [%2]! \n"// kptr k00-k07 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k04,k05,k06,k07
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a40-a47,a50-a57,a60-a67,a70-a77 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a70-a77
"vmovl.s8 q4, d6 \n"// a60-a67
"vmovl.s8 q3, d5 \n"// a50-a57
"vmovl.s8 q2, d4 \n"// a40-a47
"vmlal.s16 q6, d4, d1[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d1[0] \n"
"vmlal.s16 q6, d6, d1[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d1[1] \n"
"vmlal.s16 q6, d8, d1[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d1[2] \n"
"vmlal.s16 q6, d10, d1[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d1[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #7 \n"// r4 = remain = inch & 7
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0] \n"
: "=r"(output), // %0
"=r"(vb), // %1
"=r"(va) // %2
: "0"(output),
"1"(vb),
"2"(va),
"r"(L) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#endif // __aarch64__
#else
int sum[8] = {0};
int k=0;
for (; k+7<L; k=k+8)
{
for (int n=0; n<8; n++)
{
sum[n] += (int)va[0] * vb[n];
sum[n] += (int)va[1] * vb[n+8];
sum[n] += (int)va[2] * vb[n+16];
sum[n] += (int)va[3] * vb[n+24];
sum[n] += (int)va[4] * vb[n+32];
sum[n] += (int)va[5] * vb[n+40];
sum[n] += (int)va[6] * vb[n+48];
sum[n] += (int)va[7] * vb[n+56];
}
va += 8;
vb += 64;
}
for (; k<L; k++)
{
for (int n=0; n<8; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n=0; n<8; n++)
{
output[n] = sum[n];
}
#endif // __ARM_NEON
output += 8;
}
for (; j<N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j/8 + j%8);
#if __ARM_NEON && __aarch64__
const signed char* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4);
#else
const signed char* va = kernel_tm.channel(i/4 + i%4);
#endif // __ARM_NEON && __aarch64__
for (int k=0; k<L; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
}
#endif
|
ddcMalloc.c | // $Id$
#define _XOPEN_SOURCE 600
#include "ddcMalloc.h"
#include "mpiUtils.h"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <libgen.h>
#include <string.h>
#ifndef __APPLE__
#include <malloc.h>
#endif
#ifdef WITH_PIO
#include "pio.h"
#endif
static int addBlock(void* ptr, size_t size, char* location);
static int updateBlock(void* old_ptr, void* new_ptr, size_t size, char* location);
static int freeBlock(void* ptr);
static int findBlock(void* ptr);
static void printHeapInfo(FILE* file);
#ifdef WITH_PIO
static void printHeapInfo_pio(PFILE* file);
#endif
static int _verboseTask = -1;
typedef struct memBlock_st
{
size_t size;
void* ptr;
char location[40];
} MEMBLOCK;
#define MAX_BLOCK 32000
static MEMBLOCK _block[MAX_BLOCK];
static int _freeBlock[MAX_BLOCK];
static int _nextBlock = MAX_BLOCK +1;
static size_t _memUsed = 0;
static size_t _peakUsed = 0;
static int _blocksUsed = 0;
const double b2mb=1024*1024;
void ddcMemSetVerbose(int task)
{
_verboseTask = task;
}
void ddcMemInit(void)
{
for (unsigned ii=0; ii<MAX_BLOCK; ++ii)
{
_freeBlock[ii] = ii;
_block[ii].size = 0;
_block[ii].ptr = NULL;
_block[ii].location[0] = '\0';
}
_nextBlock = 0;
}
void ddcMemSummary(FILE* file)
{
fprintf(file,
"ddcMem task %d: peak=%7.2fMB current=%7.2fMB in %d blocks\n",
getRank(0), _peakUsed/b2mb, _memUsed/b2mb, _blocksUsed);
}
#ifdef WITH_PIO
void ddcMemSummary_pio(PFILE* file)
{
Pprintf(file,
"ddcMem task %d: peak=%7.2fMB current=%7.2fMB in %d blocks\n",
getRank(0), _peakUsed/b2mb, _memUsed/b2mb, _blocksUsed);
}
#endif
void ddcMemReport(FILE* file)
{
size_t totalSize = 0;
fprintf(file, "ddcMem report for task %d\n\n", getRank(0));
fprintf(file, "Block ptr size location\n");
fprintf(file, "=======================================================================\n");
for (unsigned ii=0; ii<MAX_BLOCK; ++ii)
{
if (_block[ii].ptr == NULL)
continue;
fprintf(file, "%5d: %10p %12zuk %s\n", ii, _block[ii].ptr,
_block[ii].size/1024, _block[ii].location);
totalSize += _block[ii].size;
}
fprintf(file, "\nTotal size = %f MB\n", totalSize/b2mb);
fprintf(file, "Peak size = %f MB\n\n", _peakUsed/b2mb);
printHeapInfo(file);
}
#ifdef WITH_PIO
void ddcMemReport_pio(PFILE* file)
{
size_t totalSize = 0;
Pprintf(file, "ddcMem report for task %d\n\n", getRank(0));
Pprintf(file, "Block ptr size location\n");
Pprintf(file, "=======================================================================\n");
for (unsigned ii=0; ii<MAX_BLOCK; ++ii)
{
if (_block[ii].ptr == NULL)
continue;
Pprintf(file, "%5d: 0x%08x %12ik %s\n", ii, _block[ii].ptr,
_block[ii].size/1024, _block[ii].location);
totalSize += _block[ii].size;
}
Pprintf(file, "\nTotal size = %f MB\n", totalSize/b2mb);
Pprintf(file, "Peak size = %f MB\n\n", _peakUsed/b2mb);
printHeapInfo_pio(file);
}
#endif
/** Implementation Note: Some implementations of malloc (such a purple)
* return a null pointer when called with a size of zero. This is
* allowed by the POSIX standard. It is entirely likely that we will
* call malloc with zero size since some tasks may logically have zero
* of some items such as fftChannels.
*
* We don't want malloc to return NULL for two reasons: First, we want
* to use a NULL return as a sign that malloc has failed. Second, we
* would like a unique pointer for each call to malloc so that we can
* add it to the block table. If two entries in the block table have
* the same pointer they become indistinguishable when we try to free
* the pointer (since only the pointer is passed to ddcFree).
*
* To work around this problem we check for a zero size and always
* allocate at least sizeof(void*). This way a NULL return is a true
* error and we get a unique pointer value to add to the block table.
*/
void* _ddcMalloc(size_t size, char* location)
{
if (size == 0)
size = sizeof(void*);
void* ptr = malloc(size);
if (!ptr)
{
printf("mem: ddcMalloc failed on task %d (%zu bytes at %s)\n"
" memUsed=%8.2f\n",
getRank(0), size, location, _memUsed/b2mb);
printHeapInfo(stdout);
}
else
{
int b = addBlock(ptr, size, location);
if (_verboseTask == getRank(0))
{
printf("mem: task %d block %d malloc %10p (%zu bytes at %s) total %8.2f\n",
getRank(0), b, ptr, size, location, _memUsed/b2mb);
printHeapInfo(stdout);
}
}
return ptr;
}
/** For the same reasons as explained above _ddcMalloc we check for zero
* size allocations and ensure at least a little memory is allocated. */
void* _ddcCalloc(size_t count, size_t size, char* location)
{
if (count == 0)
count = 1;
if (size == 0)
size = sizeof(void*);
void* ptr = calloc(count, size);
if (!ptr)
{
printf("mem: ddcCalloc failed on task %d (%zu bytes at %s\n"
" memUsed=%8.2f\n",
getRank(0), size*count, location, _memUsed/b2mb);
printHeapInfo(stdout);
}
else
{
int b = addBlock(ptr, size*count, location);
if (_verboseTask == getRank(0))
printf("mem: task %d block %d calloc %10p (%zu bytes at %s) total %8.2f\n",
getRank(0), b, ptr, size*count, location, _memUsed/b2mb);
}
return ptr;
}
/** For the same reasons as explained above _ddcMalloc we check for zero
* size allocations and ensure at least a little memory is allocated.
* By POSIX, if size is zero and ptr is non-null the object is freed.*/
void* _ddcRealloc(void* ptr, size_t size, char* location)
{
if (size == 0 && ptr == NULL)
size = sizeof(void*);
if (size == 0 && ptr != NULL)
{
_ddcFree(ptr, location);
return NULL;
}
void* old_ptr = ptr;
void* new_ptr = realloc(ptr, size);
if (!new_ptr)
{
printf("mem: ddcRealloc failed on task %d (%zu bytes at %s)\n"
" ptr=%10p\n"
" memUsed=%8.2f\n",
getRank(0), size, location, ptr, _memUsed/b2mb);
printHeapInfo(stdout);
}
else
{
int b = updateBlock(old_ptr, new_ptr, size, location);
if (_verboseTask == getRank(0))
printf("mem: task %d block %d realloc %10p (%zu bytes at %s) total %8.2f\n",
getRank(0), b, ptr, size, location, _memUsed/b2mb);
}
return new_ptr;
}
/** For the same reasons as explained above _ddcMalloc we check for zero
* size allocations and ensure at least a little memory is allocated. */
int _ddcMallocAligned(void** ptr, size_t alignment, size_t size, char* location)
{
if (size == 0)
size = sizeof(void*);
int retVal = _mallocAligned(ptr, alignment, size);
if (!*ptr)
{
printf("mem: ddcMallocAligned failed on task %d (%zu bytes at %s)\n"
" memUsed=%8.2f\n",
getRank(0), size, location, _memUsed/b2mb);
printHeapInfo(stdout);
}
else
{
int b = addBlock(ptr, size, location);
if (_verboseTask == getRank(0))
{
printf("mem: task %d block %d mallocAligned %10p (%zu bytes at %s) total %8.2f\n",
getRank(0), b, ptr, size, location, _memUsed/b2mb);
printHeapInfo(stdout);
}
}
return retVal;
}
void _ddcFree(void* ptr, const char* location)
{
free(ptr);
int b = freeBlock(ptr);
if (_verboseTask == getRank(0))
printf("mem: task %d block %d free %10p at %s total %8.2f\n",
getRank(0), b, ptr, location, _memUsed/b2mb);
}
char* _ddcLine(const char* file, int lineNum)
{
static char buffer[256];
sprintf(buffer, "%s:%d", file, lineNum);
return buffer;
}
int addBlock(void* ptr, size_t size, char* location)
{
assert(ptr != NULL);
int here;
#pragma omp critical (ddcMalloc_addBlock)
{
if (_nextBlock == MAX_BLOCK+1)
ddcMemInit();
here = _freeBlock[_nextBlock];
_block[here].ptr = ptr;
_block[here].size = size;
_block[here].location[0] = '\0';
strncat(_block[here].location, basename(location),39);
++_blocksUsed;
_memUsed += size;
if (_memUsed > _peakUsed) _peakUsed = _memUsed;
++_nextBlock;
if (_nextBlock == MAX_BLOCK)
{
printf("Block storage exhausted on task %d in addBlock.\n"
"%s\n"
"Try increasing MAX_BLOCK\n", getRank(0),location);
exit(3);
}
}
return here;
}
int updateBlock(void* old_ptr, void* new_ptr, size_t size, char* location)
{
if (old_ptr == NULL)
return addBlock(new_ptr, size, location);
int here = findBlock(old_ptr);
if (here < 0)
{
/* printf("Error in updateBlock on task %d\n" */
/* " old_ptr=%08x not found in block table.\n" */
/* " new_ptr=%08x\n" */
/* " %d bytes at %s\n", */
/* getRank(0), old_ptr, new_ptr, size, location); */
return addBlock(new_ptr, size, location);
}
#pragma omp critical (ddcMalloc_updateBlock)
{
_memUsed += (size - _block[here].size);
if (_memUsed > _peakUsed) _peakUsed = _memUsed;
_block[here].ptr = new_ptr;
_block[here].size = size;
_block[here].location[0] = '\0';
strncat(_block[here].location, basename(location),39);
}
return here;
}
int freeBlock(void* ptr)
{
if (ptr == NULL)
return -2;
int here = findBlock(ptr);
#pragma omp critical (ddcMalloc_freeBlock)
{
if (here >= 0)
{
--_blocksUsed;
_memUsed -= _block[here].size;
_block[here].ptr = NULL;
_block[here].size = 0;
_block[here].location[0] = '\0';
--_nextBlock;
assert (_nextBlock >= 0);
_freeBlock[_nextBlock] = here;
}
/* else */
/* printf("mem: Error on Task %d. Request to free ptr 0x%08x.\n" */
/* " Pointer cannot be found in block list.\n", */
/* getRank(0), ptr); */
}
return here;
}
int findBlock(void*ptr)
{
for (unsigned ii=0; ii<MAX_BLOCK; ++ii)
if (_block[ii].ptr == ptr)
return ii;
return -1;
}
void printHeapInfo(FILE* file)
{
#ifdef __APPLE__
fprintf(file, "In routine printHeapInfo no mallinfo on OS X. Sorry.\n");
#else
struct mallinfo minfo;
minfo = mallinfo();
fprintf(file, "mem: task %d system heap=%fMB used=%fMB unused=%fMB\n",
getRank(0),
minfo.arena/(1024*1024.),
minfo.uordblks/(1024*1024.),
minfo.fordblks/(1024*1024.));
#endif
}
#ifdef WITH_PIO
void printHeapInfo_pio(PFILE* file)
{
#ifdef __APPLE__
Pprintf(file, "No mallinfo on OS X. Sorry.\n");
#else
struct mallinfo minfo;
minfo = mallinfo();
Pprintf(file, "mem: task %d system heap=%fMB used=%fMB unused=%fMB\n",
getRank(0),
minfo.arena/(1024*1024.),
minfo.uordblks/(1024*1024.),
minfo.fordblks/(1024*1024.));
#endif
}
#endif // ifdef WITH_PIO
int _mallocAligned(void** ptr, size_t alignment, size_t size)
{
#ifndef __APPLE__
return posix_memalign(ptr, alignment, size);
#else
*ptr = malloc(size);
if (*ptr == NULL)
return 1;
return 0;
#endif
}
void freeNull(void **ptr)
{
if ( *ptr != NULL )
{ free(*ptr); *ptr = NULL; }
}
|
SoaDistanceTableAB.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_DTDIMPL_AB_H
#define QMCPLUSPLUS_DTDIMPL_AB_H
#include "Utilities/FairDivide.h"
#include "Message/OpenMP.h"
namespace qmcplusplus
{
/**@ingroup nnlist
* @brief A derived classe from DistacneTableData, specialized for AB using a transposed form
*/
template<typename T, unsigned D, int SC>
struct SoaDistanceTableAB : public DTD_BConds<T, D, SC>, public DistanceTableData
{
SoaDistanceTableAB(const ParticleSet& source, ParticleSet& target)
: DTD_BConds<T, D, SC>(source.Lattice), DistanceTableData(source, target)
{
resize(source.getTotalNum(), target.getTotalNum());
}
void resize(int ns, int nt)
{
N_sources = ns;
N_targets = nt;
if (N_sources * N_targets == 0)
return;
// initialize memory containers and views
const int Nsources_padded = getAlignedSize<T>(N_sources);
distances_.resize(N_targets);
displacements_.resize(N_targets);
for (int i = 0; i < N_targets; ++i)
{
distances_[i].resize(Nsources_padded);
displacements_[i].resize(Nsources_padded);
}
// The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function
// temp_r_ is padded explicitly while temp_dr_ is padded internally
temp_r_.resize(Nsources_padded);
temp_dr_.resize(N_sources);
}
SoaDistanceTableAB() = delete;
SoaDistanceTableAB(const SoaDistanceTableAB&) = delete;
/** evaluate the full table */
inline void evaluate(ParticleSet& P)
{
#pragma omp parallel
{
int first, last;
FairDivideAligned(N_sources, getAlignment<T>(), omp_get_num_threads(), omp_get_thread_num(), first, last);
//be aware of the sign of Displacement
for (int iat = 0; iat < N_targets; ++iat)
DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(),
distances_[iat].data(), displacements_[iat], first, last);
}
}
///evaluate the temporary pair relations
inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old)
{
DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_,
0, N_sources);
// If the full table is not ready all the time, overwrite the current value.
// If this step is missing, DT values can be undefined in case a move is rejected.
if (!need_full_table_)
DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(),
distances_[iat].data(), displacements_[iat], 0, N_sources);
}
///update the stripe for jat-th particle
inline void update(IndexType iat, bool partial_update)
{
std::copy_n(temp_r_.data(), N_sources, distances_[iat].data());
for (int idim = 0; idim < D; ++idim)
std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim));
}
size_t get_neighbors(int iat,
RealType rcut,
int* restrict jid,
RealType* restrict dist,
PosType* restrict displ) const
{
constexpr T cminus(-1);
size_t nn = 0;
for (int jat = 0; jat < N_targets; ++jat)
{
const RealType rij = distances_[jat][iat];
if (rij < rcut)
{ //make the compact list
jid[nn] = jat;
dist[nn] = rij;
displ[nn] = cminus * displacements_[jat][iat];
nn++;
}
}
return nn;
}
int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const
{
RealType min_dist = std::numeric_limits<RealType>::max();
int index = -1;
if (newpos)
{
for (int jat = 0; jat < N_sources; ++jat)
if (temp_r_[jat] < min_dist)
{
min_dist = temp_r_[jat];
index = jat;
}
if (index >= 0)
{
r = min_dist;
dr = temp_dr_[index];
}
}
else
{
for (int jat = 0; jat < N_sources; ++jat)
if (distances_[iat][jat] < min_dist)
{
min_dist = distances_[iat][jat];
index = jat;
}
if (index >= 0)
{
r = min_dist;
dr = displacements_[iat][index];
}
}
return index;
}
size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const
{
size_t nn = 0;
for (int jat = 0; jat < N_targets; ++jat)
{
const RealType rij = distances_[jat][iat];
if (rij < rcut)
{ //make the compact list
dist[nn] = rij;
nn++;
}
}
return nn;
}
};
} // namespace qmcplusplus
#endif
|
GB_subref_template.c | //------------------------------------------------------------------------------
// GB_subref_template: C = A(I,J)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// GB_subref_templat extracts a submatrix, C = A(I,J). The method is done in
// two phases. Phase 1 just counts the entries in C, and phase 2 constructs
// the pattern and values of C. There are 3 kinds of subref:
//
// symbolic: C(i,j) is the position of A(I(i),J(j)) in the matrix A
// iso: C = A(I,J), extracting the pattern only, not the values
// numeric: C = A(I,J), extracting the pattern and values
#if defined ( GB_SYMBOLIC )
// symbolic method must tolerate zombies
#define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen)
#else
// iso and non-iso numeric methods will not see any zombies
#define GB_Ai(p) GBI (Ai, p, avlen)
#endif
// to iterate across all entries in a bucket:
#define GB_for_each_index_in_bucket(inew,i) \
for (int64_t inew = Mark [i] - 1 ; inew >= 0 ; inew = Inext [inew])
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get A and I
//--------------------------------------------------------------------------
const int64_t *restrict Ai = A->i ;
const int64_t avlen = A->vlen ;
// these values are ignored if Ikind == GB_LIST
int64_t ibegin = Icolon [GxB_BEGIN] ;
int64_t iinc = Icolon [GxB_INC ] ;
int64_t inc = (iinc < 0) ? (-iinc) : iinc ;
#ifdef GB_DEBUG
int64_t iend = Icolon [GxB_END ] ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,kC); phase2: compute C
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast < 0) ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
}
// a coarse task accesses all of I for all its vectors
int64_t pI = 0 ;
int64_t pI_end = nI ;
int64_t ilen = nI ;
ASSERT (0 <= kfirst && kfirst <= klast && klast < Cnvec) ;
//----------------------------------------------------------------------
// compute all vectors C(:,kfirst:klast) for this task
//----------------------------------------------------------------------
for (int64_t kC = kfirst ; kC <= klast ; kC++)
{
//------------------------------------------------------------------
// get C(:,kC)
//------------------------------------------------------------------
#if defined ( GB_ANALYSIS_PHASE )
// phase1 simply counts the # of entries in C(*,kC).
int64_t clen = 0 ;
#else
// This task computes all or part of C(:,kC), which are the entries
// in Ci,Cx [pC:pC_end-1].
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,kC)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [kC] <= pC && pC <= pC_end && pC_end <= Cp [kC+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task, so this
// task computes all of C(:,kC).
pC = Cp [kC] ;
pC_end = Cp [kC+1] ;
}
int64_t clen = pC_end - pC ;
if (clen == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,kA)
//------------------------------------------------------------------
int64_t pA, pA_end ;
if (fine_task)
{
// a fine task computes a slice of a single vector C(:,kC).
// The task accesses Ai,Ax [pA:pA_end-1], which holds either
// the entire vector A(imin:imax,kA) for method 6, the entire
// dense A(:,kA) for methods 1 and 2, or a slice of the
// A(imin:max,kA) vector for all other methods.
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// a coarse task computes the entire vector C(:,kC). The task
// accesses all of A(imin:imax,kA), for most methods, or all of
// A(:,kA) for methods 1 and 2. The vector A(*,kA) appears in
// Ai,Ax [pA:pA_end-1].
pA = Ap_start [kC] ;
pA_end = Ap_end [kC] ;
}
int64_t alen = pA_end - pA ;
if (alen == 0) continue ;
//------------------------------------------------------------------
// get I
//------------------------------------------------------------------
if (fine_task)
{
// A fine task accesses I [pI:pI_end-1]. For methods 2 and 6,
// pI:pI_end is a subset of the entire 0:nI-1 list. For all
// other methods, pI = 0 and pI_end = nI, and the task can
// access all of I.
pI = TaskList [taskid].pB ;
pI_end = TaskList [taskid].pB_end ;
ilen = pI_end - pI ;
}
//------------------------------------------------------------------
// determine the method to use
//------------------------------------------------------------------
int method ;
if (fine_task)
{
// The method that the fine task uses for its slice of A(*,kA)
// and C(*,kC) has already been determined by GB_subref_slice.
method = (int) (-TaskList [taskid].klast) ;
}
else
{
// determine the method based on A(*,kA) and I
method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI,
(Mark != NULL), need_qsort, iinc, nduplicates) ;
}
//------------------------------------------------------------------
// extract C (:,kC) = A (I,kA): consider all cases
//------------------------------------------------------------------
switch (method)
{
//--------------------------------------------------------------
case 1 : // C(:,kC) = A(:,kA) where A(:,kA) is dense
//--------------------------------------------------------------
// A (:,kA) has not been sliced
ASSERT (Ikind == GB_ALL) ;
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// copy the entire vector and construct indices
#if defined ( GB_ANALYSIS_PHASE )
clen = ilen ;
#else
for (int64_t k = 0 ; k < ilen ; k++)
{
int64_t inew = k + pI ;
ASSERT (inew == GB_ijlist (I, inew, Ikind, Icolon)) ;
ASSERT (inew == GB_Ai (pA + inew)) ;
Ci [pC + k] = inew ;
}
GB_COPY_RANGE (pC, pA + pI, ilen) ;
#endif
break ;
//--------------------------------------------------------------
case 2 : // C(:,kC) = A(I,kA) where A(I,kA) is dense
//--------------------------------------------------------------
// This method handles any kind of list I, but A(:,kA)
// must be dense. A(:,kA) has not been sliced.
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// scan I and get the entry in A(:,kA) via direct lookup
#if defined ( GB_ANALYSIS_PHASE )
clen = ilen ;
#else
for (int64_t k = 0 ; k < ilen ; k++)
{
// C(inew,kC) = A(i,kA), and it always exists.
int64_t inew = k + pI ;
int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ;
ASSERT (i == GB_Ai (pA + i)) ;
Ci [pC + k] = inew ;
GB_COPY_ENTRY (pC + k, pA + i) ;
}
#endif
break ;
//--------------------------------------------------------------
case 3 : // the list I has a single index, ibegin
//--------------------------------------------------------------
// binary search in GB_subref_phase0 has already found it.
// This can be any Ikind with nI=1: GB_ALL with A->vlen=1,
// GB_RANGE with ibegin==iend, GB_STRIDE such as 0:-1:0
// (with length 1), or a GB_LIST with ni=1.
// Time: 50x faster
ASSERT (!fine_task) ;
ASSERT (alen == 1) ;
ASSERT (nI == 1) ;
ASSERT (GB_Ai (pA) == GB_ijlist (I, 0, Ikind, Icolon)) ;
#if defined ( GB_ANALYSIS_PHASE )
clen = 1 ;
#else
Ci [pC] = 0 ;
GB_COPY_ENTRY (pC, pA) ;
#endif
break ;
//--------------------------------------------------------------
case 4 : // Ikind is ":", thus C(:,kC) = A (:,kA)
//--------------------------------------------------------------
// Time: 1x faster but low speedup on the Mac. Why?
// Probably memory bound since it is just memcpy's.
ASSERT (Ikind == GB_ALL && ibegin == 0) ;
#if defined ( GB_ANALYSIS_PHASE )
clen = alen ;
#else
#if defined ( GB_SYMBOLIC )
if (nzombies == 0)
{
memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ;
}
else
{
// with zombies
for (int64_t k = 0 ; k < alen ; k++)
{
// symbolic C(:,kC) = A(:,kA) where A has zombies
int64_t i = GB_Ai (pA + k) ;
ASSERT (i == GB_ijlist (I, i, Ikind, Icolon)) ;
Ci [pC + k] = i ;
}
}
#else
memcpy (Ci + pC, Ai + pA, alen * sizeof (int64_t)) ;
#endif
GB_COPY_RANGE (pC, pA, alen) ;
#endif
break ;
//--------------------------------------------------------------
case 5 : // Ikind is GB_RANGE = ibegin:iend
//--------------------------------------------------------------
// Time: much faster. Good speedup too.
ASSERT (Ikind == GB_RANGE) ;
#if defined ( GB_ANALYSIS_PHASE )
clen = alen ;
#else
for (int64_t k = 0 ; k < alen ; k++)
{
int64_t i = GB_Ai (pA + k) ;
int64_t inew = i - ibegin ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
Ci [pC + k] = inew ;
}
GB_COPY_RANGE (pC, pA, alen) ;
#endif
break ;
//--------------------------------------------------------------
case 6 : // I is short vs nnz (A (:,kA)), use binary search
//--------------------------------------------------------------
// Time: very slow unless I is very short and A(:,kA) is
// very long.
// This case can handle any kind of I, and A(:,kA) of any
// properties. For a fine task, A(:,kA) has not been
// sliced; I has been sliced instead.
// If the I bucket inverse has not been created, this
// method is the only option. Alternatively, if nI =
// length (I) is << nnz (A (:,kA)), then scanning I and
// doing a binary search of A (:,kA) is faster than doing a
// linear-time search of A(:,kA) and a lookup into the I
// bucket inverse.
// The vector of C is constructed in sorted order, so no
// sort is needed.
// A(:,kA) has not been sliced.
ASSERT (pA == Ap_start [kC]) ;
ASSERT (pA_end == Ap_end [kC]) ;
// scan I, in order, and search for the entry in A(:,kA)
for (int64_t k = 0 ; k < ilen ; k++)
{
// C(inew,kC) = A (i,kA), if it exists.
// i = I [inew] ; or from a colon expression
int64_t inew = k + pI ;
int64_t i = GB_ijlist (I, inew, Ikind, Icolon) ;
bool found ;
int64_t pleft = pA ;
int64_t pright = pA_end - 1 ;
#if defined ( GB_SYMBOLIC )
bool is_zombie ;
GB_BINARY_SEARCH_ZOMBIE (i, Ai, pleft, pright, found,
nzombies, is_zombie) ;
#else
GB_BINARY_SEARCH (i, Ai, pleft, pright, found) ;
#endif
if (found)
{
ASSERT (i == GB_Ai (pleft)) ;
#if defined ( GB_ANALYSIS_PHASE )
clen++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pleft) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
case 7 : // I is ibegin:iinc:iend with iinc > 1
//--------------------------------------------------------------
// Time: 1 thread: C=A(1:2:n,:) is 3x slower
// but has good speedup. About as fast with
// enough threads.
ASSERT (Ikind == GB_STRIDE && iinc > 1) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present; see if it is in ibegin:iinc:iend
int64_t i = GB_Ai (pA + k) ;
ASSERT (ibegin <= i && i <= iend) ;
i = i - ibegin ;
if (i % iinc == 0)
{
// i is in the sequence ibegin:iinc:iend
#if defined ( GB_ANALYSIS_PHASE )
clen++ ;
#else
int64_t inew = i / iinc ;
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//----------------------------------------------------------
case 8 : // I = ibegin:(-iinc):iend, with iinc < -1
//----------------------------------------------------------
// Time: 2x slower for iinc = -2 or -8.
// Good speedup though. Faster for
// large values (iinc = -128).
ASSERT (Ikind == GB_STRIDE && iinc < -1) ;
for (int64_t k = alen - 1 ; k >= 0 ; k--)
{
// A(i,kA) present; see if it is in ibegin:iinc:iend
int64_t i = GB_Ai (pA + k) ;
ASSERT (iend <= i && i <= ibegin) ;
i = ibegin - i ;
if (i % inc == 0)
{
// i is in the sequence ibegin:iinc:iend
#if defined ( GB_ANALYSIS_PHASE )
clen++ ;
#else
int64_t inew = i / inc ;
ASSERT (pC < pC_end) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//----------------------------------------------------------
case 9 : // I = ibegin:(-1):iend
//----------------------------------------------------------
// Time: much faster. Good speedup.
ASSERT (Ikind == GB_STRIDE && iinc == -1) ;
#if defined ( GB_ANALYSIS_PHASE )
clen = alen ;
#else
for (int64_t k = alen - 1 ; k >= 0 ; k--)
{
// A(i,kA) is present
int64_t i = GB_Ai (pA + k) ;
int64_t inew = (ibegin - i) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
}
#endif
break ;
//--------------------------------------------------------------
case 10 : // I unsorted, and C needs qsort, duplicates OK
//--------------------------------------------------------------
// Time: with one thread: 2x slower, probably
// because of the qsort. Good speedup however. This used
// if qsort is needed but ndupl == 0. Try a method that
// needs qsort, but no duplicates?
// Case 10 works well when I has many entries and A(:,kA)
// has few entries. C(:,kC) must be sorted after this pass.
ASSERT (Ikind == GB_LIST) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// traverse bucket i for all indices inew where
// i == I [inew] or where i is from a colon expression
GB_for_each_index_in_bucket (inew, i)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_ANALYSIS_PHASE )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
// TODO: skip the sort if C is allowed to be jumbled on
// output. Flag C as jumbled instead.
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
if (!fine_task)
{
// a coarse task owns this entire C(:,kC) vector, so
// the sort can be done now. The sort for vectors
// handled by multiple fine tasks must wait until all
// task are completed, below in the post sort.
pC = Cp [kC] ;
#if defined ( GB_ISO_SUBREF )
// iso numeric subref C=A(I,J)
// just sort the pattern of C(:,kC)
GB_qsort_1 (Ci + pC, clen) ;
#else
// sort the pattern of C(:,kC), and the values
GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1),
GB_CSIZE2, clen) ;
#endif
}
#endif
break ;
//--------------------------------------------------------------
case 11 : // I not contiguous, with duplicates. No qsort needed
//--------------------------------------------------------------
// Case 11 works well when I has many entries and A(:,kA)
// has few entries. It requires that I be sorted on input,
// so that no sort is required for C(:,kC). It is
// otherwise identical to Case 10.
ASSERT (Ikind == GB_LIST) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// traverse bucket i for all indices inew where
// i == I [inew] or where i is from a colon expression
GB_for_each_index_in_bucket (inew, i)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_ANALYSIS_PHASE )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
case 12 : // I not contiguous, no duplicates. No qsort needed.
//--------------------------------------------------------------
// Identical to Case 11, except GB_for_each_index_in_bucket
// just needs to iterate 0 or 1 times. Works well when I
// has many entries and A(:,kA) has few entries.
ASSERT (Ikind == GB_LIST && nduplicates == 0) ;
for (int64_t k = 0 ; k < alen ; k++)
{
// A(i,kA) present, look it up in the I inverse buckets
int64_t i = GB_Ai (pA + k) ;
// bucket i has at most one index inew such that
// i == I [inew]
int64_t inew = Mark [i] - 1 ;
if (inew >= 0)
{
ASSERT (inew >= 0 && inew < nI) ;
ASSERT (i == GB_ijlist (I, inew, Ikind, Icolon)) ;
#if defined ( GB_ANALYSIS_PHASE )
clen++ ;
#else
Ci [pC] = inew ;
GB_COPY_ENTRY (pC, pA + k) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
break ;
//--------------------------------------------------------------
default: ;
//--------------------------------------------------------------
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_ANALYSIS_PHASE )
if (fine_task)
{
TaskList [taskid].pC = clen ;
}
else
{
Cp [kC] = clen ;
}
#endif
}
}
//--------------------------------------------------------------------------
// phase2: post sort for any vectors handled by fine tasks with method 10
//--------------------------------------------------------------------------
#if defined ( GB_PHASE_2_OF_2 )
{
if (post_sort)
{
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t kC = TaskList [taskid].kfirst ;
bool do_post_sort = (TaskList [taskid].len != 0) ;
if (do_post_sort)
{
// This is the first fine task with method 10 for C(:,kC).
// The vector C(:,kC) must be sorted, since method 10 left
// it with unsorted indices.
int64_t pC = Cp [kC] ;
int64_t clen = Cp [kC+1] - pC ;
#if defined ( GB_ISO_SUBREF )
{
// iso numeric subref C=A(I,J)
// just sort the pattern of C(:,kC)
GB_qsort_1 (Ci + pC, clen) ;
}
#else
{
// sort the pattern of C(:,kC), and the values
GB_qsort_1b (Ci + pC, (GB_void *) (Cx + pC*GB_CSIZE1),
GB_CSIZE2, clen) ;
}
#endif
}
}
}
}
#endif
}
#undef GB_Ai
#undef GB_for_each_index_in_bucket
#undef GB_COPY_RANGE
#undef GB_COPY_ENTRY
#undef GB_CSIZE1
#undef GB_CSIZE2
#undef GB_SYMBOLIC
#undef GB_ISO_SUBREF
|
GB_unop__identity_fc32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint64)
// op(A') function: GB (_unop_tran__identity_fc32_uint64)
// C type: GxB_FC32_t
// A type: uint64_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint64)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
remarks_parallel_in_multiple_target_state_machines.c | // RUN: %clang_cc1 -verify=host -Rpass=openmp -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// RUN: %clang_cc1 -fexperimental-new-pass-manager -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// host-no-diagnostics
void bar1(void) { // all-remark {{[OMP100] Potentially unknown OpenMP target region caller}}
#pragma omp parallel // #0
// all-remark@#0 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// safe-remark@#0 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}}
// force-remark@#0 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}}
// force-remark@#0 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: <NONE>}}
{
}
}
void bar2(void) { // all-remark {{[OMP100] Potentially unknown OpenMP target region caller}}
#pragma omp parallel // #1
// all-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// safe-remark@#1 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}}
// force-remark@#1 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}}
// force-remark@#1 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__6_wrapper, kernel ID: <NONE>}}
{
}
}
void foo1(void) {
#pragma omp target teams // #2
// all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}}
// all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}}
{
#pragma omp parallel // #3
// all-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
#pragma omp parallel // #4
// all-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}}
{
}
}
}
void foo2(void) {
#pragma omp target teams // #5
// all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}}
// all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}}
{
#pragma omp parallel // #6
// all-remark@#6 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#6 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
#pragma omp parallel // #7
// all-remark@#7 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#7 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
}
}
void foo3(void) {
#pragma omp target teams // #8
// all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}}
// all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}}
{
#pragma omp parallel // #9
// all-remark@#9 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#9 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
#pragma omp parallel // #10
// all-remark@#10 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#10 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
}
}
void spmd(void) {
// Verify we do not emit the remarks above for "SPMD" regions.
#pragma omp target teams
#pragma omp parallel
{
}
#pragma omp target teams distribute parallel for
for (int i = 0; i < 100; ++i) {
}
}
// all-remark@* 5 {{OpenMP runtime call __kmpc_global_thread_num moved to}}
// all-remark@* 12 {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
|
nzz.c | // nzz: compute two-dimensional per-bin redshift distribution
// ---
// author: Nicolas Tessore <nicolas.tessore@manchester.ac.uk>
// date: 28 May 2019
#define _XOPEN_SOURCE 600
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <math.h>
#include <time.h>
#include <signal.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static const int RW = 6;
int mapsort(const void* a, const void* b)
{
const double* x = a;
const double* y = b;
if(x[5] < y[5])
return -1;
if(x[5] > y[5])
return +1;
if(x[2] < y[2])
return -1;
if(x[2] > y[2])
return +1;
if(x[1] < y[1])
return -1;
if(x[1] > y[1])
return +1;
if(x[0] < y[0])
return -1;
if(x[0] > y[0])
return +1;
return 0;
}
static inline int index(double x, double y, double z, double s, int w, int h)
{
return (int)(z/s)*(w*h) + (int)(y/s)*w + (int)(x/s);
}
static inline int query(int q, const int ma[], int gx, int gy, int gz,
int gr, int* c, int v[])
{
int i, il, ih, j, jl, jh, k, kl, kh, l, m, n, p;
i = q/(gx*gy);
j = (q/gx)%gy;
k = q%gx;
il = i > gr ? i-gr : 0;
ih = i+gr < gz ? i+gr+1 : gz;
jl = j > gr ? j-gr : 0;
jh = j+gr < gy ? j+gr+1 : gy;
kl = k > gr ? k-gr : 0;
kh = k+gr < gx ? k+gr+1 : gx;
n = 0;
p = -1;
for(i = il; i < ih; ++i)
{
for(j = jl; j < jh; ++j)
{
k = (i*gy + j)*gx;
l = ma[k + kl];
m = ma[k + kh];
if(l == p)
p = (v[2*n-1] = m);
else
p = (v[2*n+0] = l, v[2*n+1] = m), ++n;
}
}
*c = n;
return q;
}
#include "io.c" // yes, really
static const char* ANIM[] = {
"\033[34m\xe2\xa0\xb7\033[0m", "\033[34m\xe2\xa0\xaf\033[0m",
"\033[34m\xe2\xa0\x9f\033[0m", "\033[34m\xe2\xa0\xbb\033[0m",
"\033[34m\xe2\xa0\xbd\033[0m", "\033[34m\xe2\xa0\xbe\033[0m"
};
static const int NANIM = sizeof(ANIM)/sizeof(*ANIM);
volatile sig_atomic_t AL;
volatile sig_atomic_t QQ;
void handler(int s)
{
AL = (s == SIGALRM);
QQ = (s == SIGQUIT);
signal(s, handler);
}
int main(int argc, char* argv[])
{
char* cfgfile;
struct config cfg;
bool ls, sc, tc;
int nc, nd, nr;
double dl, dh, d0, dm, Dl, Dh, rl, rh, rm;
double ui, uo;
int rc[2];
double* rv[2];
int cc, xc;
double* cv;
double* xv;
double gs;
int gr;
double xl, xh, yl, yh, zl, zh;
int gx, gy, gz, ng;
int* ma;
double* Z;
double* N;
double s;
time_t st;
int dt;
int i, j;
char* bf, *nf, *sv, *sx;
if(isatty(fileno(stdout)))
{
bf = "\033[1m";
nf = "\033[0m";
sv = "\033[32m\xe2\x9c\x94\033[0m";
sx = "\033[31m\xe2\x9c\x98\033[0m";
}
else
{
bf = nf = "";
sv = sx = ">";
}
cfgfile = NULL;
memset(&cfg, 0, sizeof(cfg));
if(argc > 5)
goto err_usage;
if(argc > 1 && strcmp(argv[1], "--") != 0)
cfgfile = strdup(argv[1]);
if(argc > 2 && strcmp(argv[2], "--") != 0)
cfg.output = strdup(argv[2]);
for(i = 3; i < argc; ++i)
if(strcmp(argv[i], "--") != 0)
cfg.catv[cfg.catc++] = strdup(argv[i]);
if(!cfgfile)
cfgfile = strdup("nzz.cfg");
readcfg(cfgfile, &cfg);
printf("\n");
printf("%sconfiguration file %s%s\n", bf, cfgfile, nf);
printf("\n");
printcfg(&cfg);
printf("\n");
sc = cfg.coords >= COORDS_LONLAT;
ls = cfg.spacing == SPACING_LOG;
ui = UCONV[cfg.units];
uo = UCONV[cfg.thunit];
nd = cfg.nth;
dl = cfg.thmin*uo;
dh = cfg.thmax*uo;
nr = cfg.nz;
rl = cfg.zmin;
rh = cfg.zmax;
#ifdef _OPENMP
if(cfg.num_threads)
omp_set_num_threads(cfg.num_threads);
tc = cfg.thread_data == TDATA_COPY;
#else
tc = false;
#endif
if(sc)
{
dl = 2*sin(0.5*dl);
dh = 2*sin(0.5*dh);
}
if(ls)
{
d0 = log(dl);
dm = nd/(log(dh) - d0);
}
else
{
d0 = dl;
dm = nd/(dh - d0);
}
Dl = dl*dl;
Dh = dh*dh;
rm = nr/(rh - rl);
for(nc = 0; nc < cfg.catc; ++nc)
{
printf("%sread catalog %d%s\n", bf, nc, nf);
fflush(stdout);
rc[nc] = 0;
rv[nc] = NULL;
readc(cfg.catv[nc], cfg.coords, ui, &rc[nc], &rv[nc]);
printf("%s done with %d points\n", sv, rc[nc]);
printf("\n");
}
printf("%sbuild index%s\n", bf, nf);
fflush(stdout);
gs = 0.25*dh;
gr = ceil(dh/gs);
xl = xh = rv[0][0];
yl = yh = rv[0][1];
zl = zh = rv[0][2];
for(j = 0; j < nc; ++j)
{
for(i = 1; i < rc[j]; ++i)
{
if(rv[j][i*RW+0] < xl) xl = rv[j][i*RW+0];
if(rv[j][i*RW+0] > xh) xh = rv[j][i*RW+0];
if(rv[j][i*RW+1] < yl) yl = rv[j][i*RW+1];
if(rv[j][i*RW+1] > yh) yh = rv[j][i*RW+1];
if(rv[j][i*RW+2] < zl) zl = rv[j][i*RW+2];
if(rv[j][i*RW+2] > zh) zh = rv[j][i*RW+2];
}
}
gx = floor((xh - xl)/gs) + 1;
gy = floor((yh - yl)/gs) + 1;
gz = floor((zh - zl)/gs) + 1;
ng = gx*gy*gz;
for(j = 0; j < nc; ++j)
{
for(i = 0; i < rc[j]; ++i)
rv[j][i*RW+5] = index(rv[j][i*RW+0]-xl, rv[j][i*RW+1]-yl,
rv[j][i*RW+2]-zl, gs, gx, gy);
qsort(rv[j], rc[j], RW*sizeof(double), mapsort);
}
ma = malloc((ng+1)*sizeof(int));
if(!ma)
goto err_alloc;
cc = rc[0];
cv = rv[0];
xc = rc[nc-1];
xv = rv[nc-1];
for(i = 0, j = 0; i < ng; ++i)
{
while(j < xc && xv[j*RW+5] < i)
j += 1;
ma[i] = j;
}
ma[ng] = xc;
printf("%s done with %d x %d x %d grid cells\n", sv, gx, gy, gz);
printf("\n");
Z = calloc(nd, sizeof(double));
N = calloc(nd*nr*nr, sizeof(double));
if(!Z || !N)
goto err_alloc;
s = 0;
signal(SIGALRM, handler);
signal(SIGQUIT, handler);
AL = QQ = 0;
printf("%sworking%s\n", bf, nf);
fflush(stdout);
st = time(NULL);
dt = 0;
#pragma omp parallel default(none) shared(Z, N, s, AL, QQ, st, dt) \
firstprivate(ls, sc, tc, nd, nc, d0, dm, Dl, Dh, nr, rl, rm, \
gr, gx, gy, gz, ng, cc, cv, xc, xv, ma, ANIM, NANIM, stdout)
{
int q, qc, nq;
int* qr;
double* cv_;
double* xv_;
int* ma_;
double* Z_;
double* N_;
double s_;
bool fb;
int i, j, jh;
nq = 0;
qr = malloc((2*gr+1)*(2*gr+1)*2*sizeof(int));
if(!qr)
perror(NULL), abort();
if(tc)
{
cv_ = malloc(cc*RW*sizeof(double));
if(cv != xv)
xv_ = malloc(xc*RW*sizeof(double));
else
xv_ = cv_;
ma_ = malloc((ng+1)*sizeof(int));
if(!cv_ || !xv_ || !ma_)
perror(NULL), abort();
memcpy(cv_, cv, cc*RW*sizeof(double));
if(cv != xv)
memcpy(xv_, xv, xc*RW*sizeof(double));
memcpy(ma_, ma, (ng+1)*sizeof(int));
}
else
{
cv_ = cv;
xv_ = xv;
ma_ = ma;
}
Z_ = calloc(nd, sizeof(double));
N_ = calloc(nd*nr*nr, sizeof(double));
if(!Z_ || !N_)
perror(NULL), abort();
s_ = 0;
fb = false;
#pragma omp master
if(isatty(fileno(stdout)))
{
fb = true;
AL = false;
alarm(1);
#ifdef _OPENMP
printf("\r%s %d thread(s) ", ANIM[0], omp_get_num_threads());
fflush(stdout);
#endif
}
qc = -1;
#pragma omp for schedule(dynamic, 1) nowait
for(i = 0; i < cc; ++i)
{
const double xi = cv_[i*RW+0];
const double yi = cv_[i*RW+1];
const double zi = cv_[i*RW+2];
const double ri = cv_[i*RW+3];
const double wi = cv_[i*RW+4];
const int qi = cv_[i*RW+5];
const int ni = rm*(ri - rl);
if(QQ)
continue;
if(AL && fb)
{
dt = difftime(time(NULL), st);
printf("\r%s %.2f%%", ANIM[dt%NANIM], 100.*i/cc);
printf(" in %02d:%02d:%02d ", dt/3600, (dt/60)%60, dt%60);
fflush(stdout);
AL = false;
alarm(1);
}
if(ni < 0 || ni >= nr)
continue;
if(qi != qc)
qc = query(qi, ma, gx, gy, gz, gr, &nq, qr);
for(q = 0; q < nq; ++q)
{
for(j = qr[2*q+0], jh = qr[2*q+1]; j < jh; ++j)
{
const double xj = xv_[j*RW+0];
const double yj = xv_[j*RW+1];
const double zj = xv_[j*RW+2];
const double rj = xv_[j*RW+3];
const double wj = xv_[j*RW+4];
const int nj = rm*(rj - rl);
const double dx = xi - xj;
const double dy = yi - yj;
const double dz = zi - zj;
const double D = dx*dx + dy*dy + dz*dz;
if(nj >= 0 && nj < nr && D >= Dl && D < Dh)
{
const int k = dm*((ls ? 0.5*log(D) : sqrt(D)) - d0);
const int l = k*nr*nr + ni*nr + nj;
Z_[k] += wi*wj;
N_[l] += wi*wj;
s_ += 1;
}
}
}
}
#pragma omp critical
{
for(j = 0; j < nd; ++j)
{
const int k = j*nr*nr;
Z[j] += Z_[j];
for(i = 0; i < nr*nr; ++i)
N[k+i] += (Z_[j]/Z[j])*(N_[k+i]/Z_[j] - N[k+i]);
}
s += s_;
}
free(qr);
free(Z_);
free(N_);
if(tc)
{
free(cv_);
if(cv_ != xv_)
free(xv_);
free(ma_);
}
}
dt = difftime(time(NULL), st);
if(isatty(fileno(stdin)))
printf("\r");
printf("%s done with %.0f pairs", sv, s);
printf(" in %02d:%02d:%02d \n", dt/3600, (dt/60)%60, dt%60);
printf("\n");
output(cfg.output, nd, nr, N);
free(Z);
free(N);
free(ma);
for(j = 0; j < nc; ++j)
free(rv[j]);
free(cfgfile);
freecfg(&cfg);
return EXIT_SUCCESS;
err_usage:
fprintf(stderr, "usage: nzz [config] [output] [cat ...]\n");
return EXIT_FAILURE;
err_alloc:
perror(NULL);
return EXIT_FAILURE;
}
|
evolve.c | #include <tgmath.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "evolve.h"
#include "evolve_shared.h"
#include "evolve_shared_collisions.h"
#include "evolve_sf.h"
#include "evolve_cc.h"
#include "evolve_kepler.h"
#include "evolve_ok.h"
#include "evolve_bs.h"
#include "evolve_error_control.h"
#ifdef EVOLVE_OPENCL
#include "evolve_cl.h"
#endif
int verbosity=0;
struct sys debugsys;
FLOAT eps2;
FLOAT dt_param;
struct sys zerosys ={0,0,NULL,NULL};
int accel_zero_mass=1;
int opencl_device_type=0;
struct diagnostics global_diag;
struct diagnostics *diag;
static void report(struct sys s,DOUBLE etime, int inttype);
#ifndef M_SQRT2
#define M_SQRT2 1.41421356237309504880168872420969808L
#endif
void move_system(struct sys s, DOUBLE dpos[3],DOUBLE dvel[3],int dir)
{
struct particle *ipart;
for(UINT p=0;p<s.n;p++)
{
ipart=GETPART(s, p);
for(int i=0;i<3;i++)
{
COMPSUMP(ipart->pos[i],ipart->pos_e[i],dir*dpos[i])
COMPSUMV(ipart->vel[i],ipart->vel_e[i],dir*dvel[i])
}
}
}
void system_center_of_mass(struct sys s, DOUBLE *cmpos, DOUBLE *cmvel)
{
DOUBLE mass=0.,pos[3]={0.,0.,0.},vel[3]={0.,0.,0.};
struct particle *ipart;
for(UINT p=0;p<s.n;p++)
{
ipart=GETPART(s, p);
for(int i=0;i<3;i++)
{
pos[i]+=(DOUBLE) ipart->mass*ipart->pos[i];
vel[i]+=(DOUBLE) ipart->mass*ipart->vel[i];
}
mass+=(DOUBLE) ipart->mass;
}
for(int i=0;i<3;i++)
{
cmpos[i]=pos[i]/mass;
cmvel[i]=vel[i]/mass;
}
}
DOUBLE system_kinetic_energy(struct sys s)
{
UINT i;
DOUBLE e=0.;
struct particle *ipart;
for(i=0;i<s.n;i++)
{
ipart=GETPART(s, i);
e+=0.5*ipart->mass*( ipart->vel[0]*ipart->vel[0]+
ipart->vel[1]*ipart->vel[1]+
ipart->vel[2]*ipart->vel[2] );
}
return e;
}
DOUBLE system_potential_energy(struct sys s)
{
UINT i;
DOUBLE e=0.;
struct particle *ipart;
for(i=0;i<s.n;i++)
{
ipart=GETPART(s, i);
e+=ipart->mass*ipart->pot;
}
return e/2;
}
void init_code()
{
diag=&global_diag;
#ifdef EVOLVE_OPENCL
init_cl();
#endif
}
void stop_code()
{
#ifdef EVOLVE_OPENCL
close_cl();
#endif
evolve_ok_stop(); // safe to call even if ok was not used
}
void init_evolve(struct sys s,int inttype)
{
struct particle *ipart;
for(UINT i=0;i<s.n;i++)
{
ipart=GETPART(s,i);
ipart->postime=0.;
ipart->pot=0.;
#ifdef COMPENSATED_SUMMP
ipart->pos_e[0]=0.;ipart->pos_e[1]=0.;ipart->pos_e[2]=0.;
#endif
#ifdef COMPENSATED_SUMMV
ipart->vel_e[0]=0.;ipart->vel_e[1]=0.;ipart->vel_e[2]=0.;
#endif
}
if(accel_zero_mass) split_zeromass(&s); // because of potential calc
potential(s,s);
evolve_ok_stop();
if (inttype == OK) evolve_ok_init(s);
}
void zero_diagnostics(struct diagnostics* diag)
{
diag->deepsteps=0;
diag->simtime=0.;
diag->timetrack=0.;
#ifdef EVOLVE_OPENCL
diag->cpu_step=0;
diag->cl_step=0;
diag->cpu_count=0;
diag->cl_count=0;
#endif
for(int i=0;i<MAXLEVEL;i++)
{
diag->tstep[i]=0;diag->tcount[i]=0;
diag->kstep[i]=0;diag->kcount[i]=0;
diag->dstep[i]=0;diag->dcount[i]=0;
diag->cefail[i]=0;diag->cecount[i]=0;
diag->bsstep[i]=0;diag->jcount[i]=0;
diag->ntasks[i]=0;diag->taskcount[i]=0;
}
diag->taskdrift=0;
diag->taskkick=0;
}
void sum_diagnostics(struct diagnostics* total,struct diagnostics* diag)
{
int tasksum=0;
unsigned long taskcountsum=0;
total->simtime+=diag->simtime;
total->timetrack+=diag->timetrack;
total->deepsteps+=diag->deepsteps;
for(int i=0;i<MAXLEVEL;i++)
{
total->tstep[i]+=diag->tstep[i];
total->tcount[i]+=diag->tcount[i];
total->kstep[i]+=diag->kstep[i];
total->kcount[i]+=diag->kcount[i];
total->dstep[i]+=diag->dstep[i];
total->dcount[i]+=diag->dcount[i];
total->cefail[i]+=diag->cefail[i];
total->cecount[i]+=diag->cecount[i];
total->bsstep[i]+=diag->bsstep[i];
total->jcount[i]+=diag->jcount[i];
total->ntasks[i]+=diag->ntasks[i];tasksum+=diag->ntasks[i];
total->taskcount[i]+=diag->taskcount[i];taskcountsum+=diag->taskcount[i];
}
#ifdef EVOLVE_OPENCL
total->cpu_step+=diag->cpu_step;
total->cl_step+=diag->cl_step;
total->cpu_count+=diag->cpu_count;
total->cl_count+=diag->cl_count;
#endif
#ifdef _OPENMP
if(verbosity>0) printf("task %d: %d %li %li %li\n",omp_get_thread_num(),tasksum,diag->taskdrift, diag->taskkick,taskcountsum);
#endif
}
void do_evolve(struct sys s, double dt, int inttype)
{
int i,clevel;
struct particle *ipart;
if(dt==0) return;
for(UINT p=0;p<s.n;p++) GETPART(s,p)->postime=0.;
clevel=0;
if(accel_zero_mass) split_zeromass(&s);
zero_diagnostics(diag);
debugsys=s;
switch (inttype)
{
case CONSTANT:
case CONSTANT2:
evolve_constant2(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT4:
evolve_constant4(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT6:
evolve_constant6(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT8:
evolve_constant8(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case CONSTANT10:
evolve_constant10(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case SHARED2:
evolve_shared2(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED4:
evolve_shared4(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED6:
evolve_shared6(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED8:
evolve_shared8(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHARED10:
evolve_shared10(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case SHAREDBS:
evolve_bs_adaptive(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
case BS_CC_KEPLER:
evolve_bs(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case PASS:
evolve_split_pass(clevel,s, zerosys,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case HOLD:
evolve_split_hold(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case BRIDGE:
evolve_split_bridge(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case NAIVE:
evolve_split_naive(clevel,s, zerosys,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case HOLD_DKD:
evolve_split_hold_dkd(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case PASS_DKD:
evolve_split_pass_dkd(clevel,s, zerosys, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case PPASS_DKD:
evolve_split_ppass_dkd(clevel,s, zerosys, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case BRIDGE_DKD:
evolve_split_bridge_dkd(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case CC:
case CC_KEPLER:
case CC_BS:
case CC_BSA:
case CCC:
case CCC_KEPLER:
case CCC_BS:
case CCC_BSA:
case CC_SHARED10:
case CCC_SHARED10:
#ifdef _OPENMP
#pragma omp parallel shared(global_diag,s,dt,clevel) copyin(dt_param)
{
diag=(struct diagnostics *) malloc(sizeof( struct diagnostics));
zero_diagnostics(diag);
#pragma omp master
if(verbosity>0) printf("Total Threads # %d\n", omp_get_num_threads());
#pragma omp single
#endif
#ifdef CC2_SPLIT_SHORTCUTS
evolve_cc2_shortcut(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, inttype, 1, -1.);
#else
evolve_cc2(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, inttype, 1);
#endif
#ifdef _OPENMP
#pragma omp critical
sum_diagnostics(&global_diag,diag);
free(diag);
}
diag=&global_diag;
#endif
break;
case OK:
evolve_ok2(clevel,s, zeroforces, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case KEPLER:
evolve_kepler(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt);
break;
case FOURTH_M4:
evolve_sf_4m4(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case FOURTH_M5:
evolve_sf_4m5(clevel,s,(DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt,1);
break;
case SHARED2_COLLISIONS:
evolve_shared2_collision_detection(s, (DOUBLE) dt);
break;
case SHARED4_COLLISIONS:
evolve_shared4_collision_detection(s, (DOUBLE) dt);
break;
case SHARED6_COLLISIONS:
evolve_shared6_collision_detection(s, (DOUBLE) dt);
break;
case SHARED8_COLLISIONS:
evolve_shared8_collision_detection(s, (DOUBLE) dt);
break;
case SHARED10_COLLISIONS:
evolve_shared10_collision_detection(s, (DOUBLE) dt);
break;
case ERROR_CONTROL:
evolve_error_control(clevel,s, (DOUBLE) 0.,(DOUBLE) dt,(DOUBLE) dt, -1.);
break;
default:
ENDRUN("unknown integrator %d\n", inttype);
break;
}
for(UINT p=0;p<s.n;p++) GETPART(s,p)->pot=0;
potential(s,s);
if(verbosity>0) report(s,(DOUBLE) dt, inttype);
}
void drift(int clevel,struct sys s, DOUBLE etime, DOUBLE dt)
{
struct particle *ipart;
for(UINT i=0;i<s.n;i++)
{
ipart=GETPART(s,i);
COMPSUMP(ipart->pos[0],ipart->pos_e[0],dt*ipart->vel[0])
COMPSUMP(ipart->pos[1],ipart->pos_e[1],dt*ipart->vel[1])
COMPSUMP(ipart->pos[2],ipart->pos_e[2],dt*ipart->vel[2])
ipart->postime=etime;
}
diag->dstep[clevel]++;
diag->dcount[clevel]+=s.n;
diag->taskdrift+=s.n;
}
static void kick_cpu(struct sys s1, struct sys s2, DOUBLE dt)
{
FLOAT dx[3],dr3,dr2,dr,acci;
FLOAT acc[3];
struct particle *ipart, *jpart;
#pragma omp parallel for if((ULONG) s1.n*(s2.n-s2.nzero)>MPWORKLIMIT && !omp_in_parallel()) default(none) \
private(dx,dr3,dr2,dr,acc,acci,ipart,jpart) \
shared(dt,s1,s2,eps2)
for(UINT i=0;i<s1.n;i++)
{
ipart=GETPART(s1,i);
acc[0]=0.;
acc[1]=0.;
acc[2]=0.;
for(UINT j=0;j<s2.n-s2.nzero;j++)
{
jpart=GETPART(s2,j);
//~ if(jpart->mass==0) continue;
// if(ipart==jpart) continue;
dx[0]=ipart->pos[0]-jpart->pos[0];
dx[1]=ipart->pos[1]-jpart->pos[1];
dx[2]=ipart->pos[2]-jpart->pos[2];
dr2=dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2]+eps2;
if(dr2>0)
{
dr=sqrt(dr2);
dr3=dr*dr2;
acci=jpart->mass/dr3;
acc[0]-=dx[0]*acci;
acc[1]-=dx[1]*acci;
acc[2]-=dx[2]*acci;
}
}
COMPSUMV(ipart->vel[0],ipart->vel_e[0],dt*acc[0]);
COMPSUMV(ipart->vel[1],ipart->vel_e[1],dt*acc[1]);
COMPSUMV(ipart->vel[2],ipart->vel_e[2],dt*acc[2]);
}
}
void kick(int clevel,struct sys s1, struct sys s2, DOUBLE dt)
{
#ifdef EVOLVE_OPENCL
if((ULONG) s1.n*(s2.n-s2.nzero)>CLWORKLIMIT)
{
#pragma omp critical
kick_cl(s1,s2,dt);
diag->cl_step++;
diag->cl_count+=(ULONG) s1.n*s2.n;
} else
{
kick_cpu(s1,s2,dt);
diag->cpu_step++;
diag->cpu_count+=(ULONG) s1.n*s2.n;
}
#else
kick_cpu(s1,s2,dt);
#endif
diag->kstep[clevel]++;
diag->kcount[clevel]+=(ULONG) s1.n*s2.n;
diag->taskkick+=(ULONG) s1.n*s2.n;
}
static void potential_cpu(struct sys s1,struct sys s2)
{
FLOAT dx[3],dr2,dr;
FLOAT pot;
struct particle *ipart, *jpart;
#pragma omp parallel for if((ULONG) s1.n*(s2.n-s2.nzero)>MPWORKLIMIT && !omp_in_parallel()) default(none) \
private(dx,dr2,dr,pot, ipart,jpart) \
shared(s1,s2,eps2)
for(UINT i=0;i<s1.n;i++)
{
pot=0;
ipart=GETPART(s1,i);
for(UINT j=0;j<s2.n-s2.nzero;j++)
{
jpart=GETPART(s2,j);
if(ipart==jpart) continue;
dx[0]=ipart->pos[0]-jpart->pos[0];
dx[1]=ipart->pos[1]-jpart->pos[1];
dx[2]=ipart->pos[2]-jpart->pos[2];
dr2=dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2]+eps2;
if(dr2>0)
{
dr=sqrt(dr2);
pot-=jpart->mass/dr;
}
}
ipart->pot+=pot;
}
}
void potential(struct sys s1, struct sys s2)
{
#ifdef EVOLVE_OPENCL
if((ULONG) s1.n*(s2.n-s2.nzero)>CLWORKLIMIT)
{
#pragma omp critical
potential_cl(s1,s2);
} else
{
potential_cpu(s1,s2);
}
#else
potential_cpu(s1,s2);
#endif
}
inline FLOAT timestep_ij(struct particle *i, struct particle *j,int dir) {
FLOAT timestep;
FLOAT dx[3],dr3,dr2,dr,dv[3],dv2,mu,vdotdr2,tau,dtau;
timestep=HUGE_VAL;
if(i==j) return timestep;
dx[0]=i->pos[0] - j->pos[0];
dx[1]=i->pos[1] - j->pos[1];
dx[2]=i->pos[2] - j->pos[2];
dr2=dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2]+eps2;
mu=i->mass + j->mass;
if(dr2>0 && mu>0)
{
dr=sqrt(dr2);
dr3=dr*dr2;
dv[0]=i->vel[0] - j->vel[0];
dv[1]=i->vel[1] - j->vel[1];
dv[2]=i->vel[2] - j->vel[2];
vdotdr2=(dv[0]*dx[0]+dv[1]*dx[1]+dv[2]*dx[2])/dr2;
dv2=dv[0]*dv[0]+dv[1]*dv[1]+dv[2]*dv[2];
#ifdef RATIMESTEP
tau=RARVRATIO*dt_param/M_SQRT2*sqrt(dr3/mu);
dtau=3/2.*dir*tau*vdotdr2;
if(dtau>1.) dtau=1.;
tau/=(1-dtau/2);
if(tau < timestep) timestep=tau;
#endif
#ifdef RVTIMESTEP
if(dv2>0)
{
tau=dt_param*dr/sqrt(dv2);
dtau=dir*tau*vdotdr2*(1+mu/(dv2*dr));
if(dtau>1.) dtau=1.;
tau/=(1-dtau/2);
if(tau < timestep) timestep=tau;
}
#endif
}
if (timestep < 0)
{
ENDRUN("negative timestep!\n");
}
return timestep;
}
static void timestep_cpu(struct sys s1, struct sys s2,int dir)
{
UINT i,j, jmax;
FLOAT timestep,tau;
struct particle *ipart;
#pragma omp parallel for if((ULONG) (s1.n*s2.n-s1.nzero*s2.nzero)>MPWORKLIMIT && !omp_in_parallel()) default(none) \
private(i,j,tau,timestep, jmax, ipart) copyin(dt_param) \
shared(s1,s2,stdout,dir)
for(i=0;i<s1.n;i++)
{
timestep=HUGE_VAL;
ipart=GETPART(s1,i);
jmax=s2.n;if(i>=s1.n-s1.nzero) jmax=s2.n-s2.nzero;
for(j=0;j<jmax;j++)
{
tau=timestep_ij(ipart,GETPART(s2,j),dir);
if(tau < timestep) timestep=tau;
}
// if(timestep<ipart->timestep)
ipart->timestep=timestep;
}
}
void timestep(int clevel,struct sys s1, struct sys s2,int dir)
{
#ifdef EVOLVE_OPENCL
if((ULONG) (s1.n*s2.n-s1.nzero*s2.nzero)>CLWORKLIMIT)
{
#pragma omp critical
timestep_cl(s1,s2,dir);
} else
{
timestep_cpu(s1,s2,dir);
}
#else
timestep_cpu(s1,s2,dir);
#endif
diag->tstep[clevel]++;
diag->tcount[clevel]+=(ULONG) s1.n*s2.n;
}
static void report(struct sys s,DOUBLE etime, int inttype)
{
int maxlevel=0,i;
long int ktot=0,dtot=0, kstot=0,dstot=0,ttot=0,tstot=0;
UINT n,p,err=0;
struct particle *ipart;
n=s.n;
printf("** report **\n");
printf("interaction counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
printf(" %4i: %10li %18li, %10li %18li\n",i, diag->kstep[i], diag->kcount[i], diag->dstep[i],diag->dcount[i]);
if(diag->kcount[i]>0) maxlevel=i;
ttot+=diag->tcount[i];
ktot+=diag->kcount[i];
dtot+=diag->dcount[i];
tstot+=diag->tstep[i];
kstot+=diag->kstep[i];
dstot+=diag->dstep[i];
}
printf("total: %18li %18li %18li\n",ktot,dtot,ttot);
if(inttype == PASS_DKD || inttype == HOLD_DKD || inttype == PPASS_DKD)
printf("equiv: %18li %18li %18li\n",(long int) diag->deepsteps*n*n,2*diag->deepsteps*n,(long int) diag->deepsteps*n*n);
else
printf("equiv: %18li %18li %18li\n",(long int) 2*diag->deepsteps*n*n,diag->deepsteps*n,(long int) diag->deepsteps*n*n);
printf("ksteps: %18li, dsteps: %18li, tsteps: %18li\n", kstot,dstot,tstot);
printf("steps: %18li, equiv: %18li, maxlevel: %i\n",
diag->deepsteps,((long) 1)<<maxlevel,maxlevel);
for(p=0;p<s.n;p++) if(GETPART(s,p)->postime != (DOUBLE) etime) err++;
printf("postime errors: %u \n",err);
printf("target time, actual time: %12.8g %12.8g %12.8g\n",
(double) etime,(double) diag->simtime,(double) ((DOUBLE) etime-diag->simtime));
printf("time track, ratio: %12.8g %12.8g\n", (double) diag->timetrack,
(double) (diag->simtime!=0? (diag->timetrack/diag->simtime) :1));
#ifdef EVOLVE_OPENCL
printf("cpu step,count: %12li,%18li\n",diag->cpu_step,diag->cpu_count);
printf("cl step,count: %12li,%18li\n",diag->cl_step,diag->cl_count);
#endif
if(inttype==SHAREDBS || inttype==CC_BS || inttype==CCC_BS || inttype==CC_BSA || inttype==CCC_BSA)
{
unsigned long totalbs=0,totalj=0;
printf("bs counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
totalbs+=diag->bsstep[i];
totalj+=diag->jcount[i];
printf("%d: %18li %18li %f\n",i,diag->bsstep[i],diag->jcount[i],diag->jcount[i]/(1.*diag->bsstep[i]+1.e-20));
}
printf(" total, total j, mean j: %18li %18li %f\n",totalbs,totalj,totalj/(1.*totalbs));
}
if(inttype==KEPLER || inttype==CC_KEPLER || inttype==CCC_KEPLER || inttype==CCC_BS ||
inttype==CC_BS || inttype==CCC_BSA || inttype==CC_BSA || inttype==CC_SHARED10 || inttype==CC_SHARED10)
{
unsigned long totalcefail=0,totalcecount=0;
printf("kepler solver counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
totalcefail+=diag->cefail[i];
totalcecount+=diag->cecount[i];
printf("%d: %18li %18li\n",i,diag->cefail[i],diag->cecount[i]);
}
printf(" total, total j, mean j: %18li %18li\n",totalcefail,totalcecount);
}
#ifdef _OPENMP
{
int totaltasks=0;
printf("task counts:\n");
for(i=0;i<MAXLEVEL;i++)
{
printf("%d: %18li %18li\n",i,diag->ntasks[i],diag->taskcount[i]);
totaltasks+=diag->ntasks[i];
}
printf("openmp tasks: %d\n",totaltasks);
}
#endif
fflush(stdout);
}
void join_array(UINT n1, struct particle *p1,
UINT n2, struct particle *p2,
UINT *n, struct particle **p)
{
if(n1==0 && n2==0)
{
*n=0;*p=NULL;
}
if(n1!=0 && n2==0)
{
*n=n1;*p=p1;
}
if(n1==0 && n2!=0)
{
*n=n2;*p=p2;
}
if(n1!=0 && n2!=0)
{
*n=n1+n2;
if(p1+n1==p2)
{
*p=p1;
}
else
{
if(p2+n2==p1)
{
*p=p2;
} else ENDRUN("join_array error");
}
}
}
struct sys join(struct sys s1,struct sys s2)
{
struct sys s=zerosys;
if(s1.n==0) return s2;
if(s2.n==0) return s1;
join_array(s1.n-s1.nzero, s1.part, s2.n-s2.nzero, s2.part, &s.n, &s.part);
join_array(s1.nzero, s1.zeropart, s2.nzero, s2.zeropart, &s.nzero, &s.zeropart);
s.n=s.n+s.nzero;
if(s.n-s.nzero>0 && LAST(s)-s.part + 1 != s.n-s.nzero) ENDRUN("join error 1");
if(s.nzero>0 && LASTZERO(s)-s.zeropart + 1 != s.nzero) ENDRUN("join error 2");
return s;
}
FLOAT global_timestep(struct sys s)
{
FLOAT dt,mindt=HUGE_VAL;
for(UINT i=0;i<s.n;i++)
{
dt=GETPART(s, i)->timestep;
if(dt<mindt) mindt=dt;
}
return mindt;
}
FLOAT max_global_timestep(struct sys s)
{
FLOAT dt,maxdt=0;
for(UINT i=0;i<s.n;i++)
{
dt=GETPART(s, i)->timestep;
if(dt>maxdt) maxdt=dt;
}
return maxdt;
}
void kdk(int clevel,struct sys s1,struct sys s2, DOUBLE stime, DOUBLE etime, DOUBLE dt)
{
if(s2.n>0) kick(clevel,s2, s1, dt/2);
kick(clevel,s1,join(s1,s2),dt/2);
drift(clevel,s1,etime, dt);
kick(clevel,s1,join(s1,s2),dt/2);
if(s2.n>0) kick(clevel,s2, s1, dt/2);
}
void dkd(int clevel,struct sys s1,struct sys s2, DOUBLE stime, DOUBLE etime, DOUBLE dt)
{
drift(clevel,s1,stime+dt/2, dt/2);
kick(clevel,s1,join(s1,s2),dt);
if(s2.n>0) kick(clevel,s2, s1, dt);
drift(clevel,s1,etime, dt/2);
}
void split_zeromass(struct sys *s)
{
UINT i=0;
struct particle *left, *right;
if(s->n==0) return;
if(s->part==NULL) ENDRUN("split_zeromass malformed input");
if(s->n-s->nzero==0)
{
if(s->zeropart==NULL || s->part!=s->zeropart) ENDRUN("split_zeromass malformed input");
if(LASTZERO(*s)-s->zeropart+1!=s->nzero) ENDRUN( "split_zeromass malformed input sys");
return;
}
if(s->nzero!=0 && LAST(*s)+1!=s->zeropart)
ENDRUN("split_zeromass can only work on fully contiguous sys");
left=s->part;
right=s->part+(s->n-1);
while(1)
{
if(i>=s->n) ENDRUN("split_zeromass error 1");
i++;
while(left->mass!=0 && left<right) left++;
while(right->mass==0 && left<right) right--;
if(left<right)
{SWAP( *left, *right, struct particle);}
else
break;
}
if(left->mass!=0) left++;
s->nzero=s->n-(left-s->part);
if(s->nzero<0) ENDRUN("split_zeromass find negative number of part");
if(s->nzero>0)
{
s->zeropart=left;
}
if((left-s->part)+s->nzero !=s->n) ENDRUN( "split_zeromass error 2");
for(i=0;i<(s->n-s->nzero);i++) if(GETPART(*s,i)->mass==0) ENDRUN ("split_zromass error 3");
for(i=s->n-s->nzero;i<s->n;i++) if(GETPART(*s,i)->mass!=0) ENDRUN ("split_zeromass error 4");
#ifdef CONSISTENCY_CHECKS
verify_split_zeromass(*s);
#endif
}
void verify_split_zeromass(struct sys s)
{
if(!accel_zero_mass) return;
for(UINT i=0;i<s.n-s.nzero;i++) if(GETPART(s,i)->mass==0) ENDRUN("massless particle in main part\n")
for(UINT i=s.n-s.nzero;i<s.n;i++) if(GETPART(s,i)->mass!=0) ENDRUN("massive particle in massless part\n")
}
|
oskar_cross_correlate_gaussian_omp.c | /*
* Copyright (c) 2013-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <math.h>
#include "correlate/private_correlate_functions_inline.h"
#include "correlate/oskar_cross_correlate_gaussian_omp.h"
#include "math/oskar_add_inline.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Single precision. */
void oskar_cross_correlate_gaussian_omp_f(int num_sources, int num_stations,
const float4c* jones, const float* source_I, const float* source_Q,
const float* source_U, const float* source_V, const float* source_l,
const float* source_m, const float* source_n, const float* source_a,
const float* source_b, const float* source_c, const float* station_u,
const float* station_v, const float* station_w, float uv_min_lambda,
float uv_max_lambda, float inv_wavelength, float frac_bandwidth,
float4c* vis)
{
int SQ;
/* Loop over stations. */
#pragma omp parallel for private(SQ) schedule(dynamic, 1)
for (SQ = 0; SQ < num_stations; ++SQ)
{
int SP, i;
const float4c *station_p, *station_q;
/* Pointer to source vector for station q. */
station_q = &jones[SQ * num_sources];
/* Loop over baselines for this station. */
for (SP = SQ + 1; SP < num_stations; ++SP)
{
float uv_len, uu, vv, ww, uu2, vv2, uuvv;
float4c sum, guard;
oskar_clear_complex_matrix_f(&sum);
oskar_clear_complex_matrix_f(&guard);
/* Pointer to source vector for station p. */
station_p = &jones[SP * num_sources];
/* Get common baseline values. */
oskar_evaluate_baseline_terms_inline_f(station_u[SP],
station_u[SQ], station_v[SP], station_v[SQ],
station_w[SP], station_w[SQ], inv_wavelength,
frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv);
/* Apply the baseline length filter. */
if (uv_len < uv_min_lambda || uv_len > uv_max_lambda)
continue;
/* Loop over sources. */
for (i = 0; i < num_sources; ++i)
{
float l, m, n, r1, r2;
/* Get source direction cosines. */
l = source_l[i];
m = source_m[i];
n = source_n[i];
/* Compute bandwidth-smearing term. */
r1 = oskar_sinc_f(uu * l + vv * m + ww * (n - 1.0f));
/* Evaluate Gaussian source width term. */
r2 = expf(-(source_a[i] * uu2 + source_b[i] * uuvv +
source_c[i] * vv2));
r1 *= r2;
/* Accumulate baseline visibility response for source. */
oskar_accumulate_baseline_visibility_for_source_inline_f(&sum,
i, source_I, source_Q, source_U, source_V,
station_p, station_q, r1, &guard);
}
/* Add result to the baseline visibility. */
i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ);
oskar_add_complex_matrix_in_place_f(&vis[i], &sum);
}
}
}
/* Double precision. */
void oskar_cross_correlate_gaussian_omp_d(int num_sources, int num_stations,
const double4c* jones, const double* source_I, const double* source_Q,
const double* source_U, const double* source_V, const double* source_l,
const double* source_m, const double* source_n, const double* source_a,
const double* source_b, const double* source_c, const double* station_u,
const double* station_v, const double* station_w, double uv_min_lambda,
double uv_max_lambda, double inv_wavelength, double frac_bandwidth,
double4c* vis)
{
int SQ;
/* Loop over stations. */
#pragma omp parallel for private(SQ) schedule(dynamic, 1)
for (SQ = 0; SQ < num_stations; ++SQ)
{
int SP, i;
const double4c *station_p, *station_q;
/* Pointer to source vector for station q. */
station_q = &jones[SQ * num_sources];
/* Loop over baselines for this station. */
for (SP = SQ + 1; SP < num_stations; ++SP)
{
double uv_len, uu, vv, ww, uu2, vv2, uuvv;
double4c sum;
oskar_clear_complex_matrix_d(&sum);
/* Pointer to source vector for station p. */
station_p = &jones[SP * num_sources];
/* Get common baseline values. */
oskar_evaluate_baseline_terms_inline_d(station_u[SP],
station_u[SQ], station_v[SP], station_v[SQ],
station_w[SP], station_w[SQ], inv_wavelength,
frac_bandwidth, &uv_len, &uu, &vv, &ww, &uu2, &vv2, &uuvv);
/* Apply the baseline length filter. */
if (uv_len < uv_min_lambda || uv_len > uv_max_lambda)
continue;
/* Loop over sources. */
for (i = 0; i < num_sources; ++i)
{
double l, m, n, r1, r2;
/* Get source direction cosines. */
l = source_l[i];
m = source_m[i];
n = source_n[i];
/* Compute bandwidth-smearing term. */
r1 = oskar_sinc_d(uu * l + vv * m + ww * (n - 1.0));
/* Evaluate Gaussian source width term. */
r2 = exp(-(source_a[i] * uu2 + source_b[i] * uuvv +
source_c[i] * vv2));
r1 *= r2;
/* Accumulate baseline visibility response for source. */
oskar_accumulate_baseline_visibility_for_source_inline_d(&sum,
i, source_I, source_Q, source_U, source_V,
station_p, station_q, r1);
}
/* Add result to the baseline visibility. */
i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ);
oskar_add_complex_matrix_in_place_d(&vis[i], &sum);
}
}
}
#ifdef __cplusplus
}
#endif
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 128
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
if (omp_t > 1) {
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
int extra;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (hexlenl(p, &extra) != CIPHERTEXT_LENGTH-PREFIX_LENGTH || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ PREFIX_VALUE },
SybasePROP_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
donde.c | /* donde.c
* A hybrid MPI / OpenMP program that reports the CPU where each thread
* of each rank is executing. Used to assist in determining correct
* binding behavior.
* Rory Kelly
* 3 May 2017
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
#include <unistd.h>
#include <mpi.h>
#include <omp.h>
int main(int argc, char **argv){
int mpi_id; // MPI Task ID
int n_mpi; // Number of MPI Tasks
int omp_id; // OpenMP Thread ID
int n_omp; // Number of OpenMP threads
int my_cpu; // CPU # where task/thread is executing
int mpi_tsup_lev; // provided level of MPI thread support
char thrd_str[80]; // the per-thread output string
char node_name[80]; // The node where process / thread is executing
int length; // Length of returned string
MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &mpi_tsup_lev);
MPI_Comm_size(MPI_COMM_WORLD, &n_mpi);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_id);
MPI_Get_processor_name(node_name, &length);
// Print MPI Rank and OpenMP thread info in-order for readability
for(int j=0; j<n_mpi; j++){
if(j == mpi_id){
#pragma omp parallel private(omp_id, n_omp, my_cpu, thrd_str)
{
omp_id = omp_get_thread_num();
n_omp = omp_get_num_threads();
my_cpu = sched_getcpu();
if (omp_id == 0){
sprintf(thrd_str, "MPI Task %2d, OpenMP thread %d of %d (cpu %d)", mpi_id, omp_id, n_omp, my_cpu);
} else {
sprintf(thrd_str, " OpenMP thread %d of %d (cpu %d)", omp_id, n_omp, my_cpu);
}
#pragma omp for ordered schedule(static, 1)
for(int i=0; i<n_omp; i++){
#pragma omp ordered
{
puts(thrd_str);
}
}
}
}
MPI_Barrier(MPI_COMM_WORLD);
}
return MPI_Finalize();
}
|
gsrb.flux.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// This version fissions the FV stencil into the 6 fluxes associate with each direction for each cell. In order to avoid
// redundant computation, each flux is calculated only once. However, in order to avoid writing all these fluxes to memory and
// then rereading them to complete the laplacian, the calculation of fluxes and summation in the laplacian are performed in a
// pipelined wavefront. To further enhance performance, the ij loops are fused (ghost zones are clobbered) and OpenMP simd
// pragmas are utilized. Finally, compiler specific hints and directives are utilized to facilitate simdization and nontemporal
// stores.
//------------------------------------------------------------------------------------------------------------------------------
#if (BLOCKCOPY_TILE_I != 10000)
#error operators.flux.c cannot block the unit stride dimension (BLOCKCOPY_TILE_I!=10000).
#endif
//------------------------------------------------------------------------------------------------------------------------------
void smooth(level_type * level, int x_id, int rhs_id, double a, double b){
// allocate a buffer to hold fluxes...
if(level->fluxes==NULL)level->fluxes = (double*)MALLOC( ( (4*level->num_threads)*(BLOCKCOPY_TILE_J+1)*(level->box_jStride) + BOX_ALIGN_JSTRIDE)*sizeof(double) );
// align fluxes to BOX_ALIGN_JSTRIDE
double * __restrict__ fluxes_aligned = level->fluxes;
uint64_t unaligned_by = (uint64_t)(fluxes_aligned) & (BOX_ALIGN_JSTRIDE-1)*sizeof(double);
if(unaligned_by)fluxes_aligned = (double*)( (uint64_t)(fluxes_aligned) + BOX_ALIGN_JSTRIDE*sizeof(double) - unaligned_by );
int s;for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps per GSRB smooth
// exchange the ghost zone...
if((s&1)==0){
exchange_boundary(level, x_id,stencil_get_shape());
apply_BCs(level, x_id,stencil_get_shape());
}else{
exchange_boundary(level,VECTOR_TEMP,stencil_get_shape());
apply_BCs(level,VECTOR_TEMP,stencil_get_shape());
}
// apply the smoother...
double _timeStart = getTime();
double h2inv = 1.0/(level->h*level->h);
// loop over all block/tiles this process owns...
#ifdef _OPENMP
#pragma omp parallel if(level->num_my_blocks>1)
#endif
{
int block;
int threadID=0;
#ifdef _OPENMP
threadID=omp_get_thread_num();
#endif
// [thread][flux][ij] layout
double * __restrict__ flux_i = fluxes_aligned + (4*threadID + 0)*(BLOCKCOPY_TILE_J+1)*(level->box_jStride);
double * __restrict__ flux_j = fluxes_aligned + (4*threadID + 1)*(BLOCKCOPY_TILE_J+1)*(level->box_jStride);
double * __restrict__ flux_k[2] = {fluxes_aligned + (4*threadID + 2)*(BLOCKCOPY_TILE_J+1)*(level->box_jStride),
fluxes_aligned + (4*threadID + 3)*(BLOCKCOPY_TILE_J+1)*(level->box_jStride)};
// loop over (cache) blocks...
#ifdef _OPENMP
#pragma omp for schedule(static,1)
#endif
for(block=0;block<level->num_my_blocks;block++){
const int box = level->my_blocks[block].read.box;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int jdim = level->my_blocks[block].dim.j;
const int kdim = level->my_blocks[block].dim.k;
const int ghosts = level->my_boxes[box].ghosts;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
#ifdef VECTOR_ALPHA
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
#else
const double * __restrict__ alpha = NULL;
#endif
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ x_n;
double * __restrict__ x_np1;
if((s&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);}
else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);}
#ifdef __INTEL_COMPILER
// superfluous with OMP4 simd (?)
//__assume_aligned(x_n ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(x_np1 ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_i ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_j ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(beta_k ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(Dinv ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_i ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_j ,BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_k[0],BOX_ALIGN_JSTRIDE*sizeof(double));
//__assume_aligned(flux_k[1],BOX_ALIGN_JSTRIDE*sizeof(double));
__assume( jStride % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned
__assume( kStride % BOX_ALIGN_JSTRIDE == 0);
__assume( jStride >= BOX_ALIGN_JSTRIDE);
__assume( kStride >= 3*BOX_ALIGN_JSTRIDE);
__assume( jdim > 0);
__assume( kdim > 0);
#elif __xlC__
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), Dinv );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x_n );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x_np1 );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_i );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_j );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k[0]);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k[1]);
#endif
int ij,k;
double * __restrict__ flux_klo = flux_k[0];
// startup / prolog... calculate flux_klo (bottom of cell)...
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_k,x_n,flux_klo:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<jdim*jStride;ij++){
flux_klo[ij] = beta_dxdk(x_n,ij); // k==0
}
// wavefront loop...
for(k=0;k<kdim;k++){
double * __restrict__ flux_klo = flux_k[(k )&0x1];
double * __restrict__ flux_khi = flux_k[(k+1)&0x1];
// calculate flux_i and flux_j together
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_i,beta_j,x_n,flux_i,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + k*kStride;
flux_i[ij] = beta_dxdi(x_n,ijk);
flux_j[ij] = beta_dxdj(x_n,ijk);
}
// calculate flux_jhi
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_j,x_n,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=jdim*jStride;ij<(jdim+1)*jStride;ij++){
int ijk = ij + k*kStride;
flux_j[ij] = beta_dxdj(x_n,ijk);
}
// calculate flux_khi (top of cell)
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_k,x_n,flux_khi:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + k*kStride;
flux_khi[ij] = beta_dxdk(x_n,ijk+kStride); // k+1
}
const int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k^jlo^klo^s); // is element 000 of this *BLOCK* 000 red or black on this sweep
const double * __restrict__ RedBlack = level->RedBlack_FP + ghosts*(1+jStride) + jStride*((k^color000)&0x1); // Red/Black pencils... presumes ghost zones were corectly colored
#if (_OPENMP>=201307)
#pragma omp simd aligned(flux_i,flux_j,flux_klo,flux_khi,alpha,rhs,Dinv,x_n,x_np1,RedBlack:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
#ifdef __INTEL_COMPILER
#pragma vector nontemporal // generally, we don't expect to reuse x_np1
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + k*kStride;
double Lx = - flux_i[ ij] + flux_i[ ij+ 1]
- flux_j[ ij] + flux_j[ ij+jStride]
- flux_klo[ij] + flux_khi[ij ];
#ifdef USE_HELMHOLTZ
double Ax = a*alpha[ijk]*x_n[ijk] - b*Lx;
#else
double Ax = -b*Lx;
#endif
x_np1[ijk] = x_n[ijk] + RedBlack[ij]*Dinv[ijk]*(rhs[ijk]-Ax);
}
} // kdim
} // block
} // omp
level->timers.smooth += (double)(getTime()-_timeStart);
} // s-loop
}
//------------------------------------------------------------------------------------------------------------------------------
|
locktable.h | #pragma once
#include <list>
#include <memory>
#include <sstream>
#include "config.h"
#include "row.h"
#include "txn.h"
#include "row_lock.h"
#include "row_ts.h"
#include "row_mvcc.h"
#include "row_hekaton.h"
#include "row_occ.h"
#include "row_tictoc.h"
#include "row_silo.h"
#include "row_vll.h"
#include "log.h"
#include "log_alg_list.h"
#include "helper.h"
#include "global.h"
#include "manager.h"
#include <emmintrin.h>
#include <nmmintrin.h>
#include <immintrin.h>
#if USE_LOCKTABLE
#include "row.h"
#define CALC_PKEY (((uint64_t)row) / sizeof(row_t))
class LockTableListItem
{
//int atomicLock;
public:
bool evicted;
uint64_t key; // supporting only 2^62 instead of 2^63
row_t *row;
#if LOG_ALGORITHM == LOG_TAURUS
//#if CC_ALG == SILO
//uint64_t keep;
//#endif
lsnType *lsn_vec;
lsnType *readLV;
#elif LOG_ALGORITHM == LOG_SERIAL
lsnType *lsn;
#endif
LockTableListItem(uint64_t _key, row_t *_row, char *cache = NULL, uint64_t size = 0) : key(_key), row(_row)
{
evicted = false;
#if LOG_ALGORITHM == LOG_TAURUS
/*#if CC_ALG == SILO
keep =1;
#endif*/
// we will initialize lsn_vec later
#if COMPRESS_LSN_LT
lsn_vec = (lsnType *)MALLOC(sizeof(lsnType) * (G_NUM_LOGGER + 1), GET_THD_ID);
#else
#if UPDATE_SIMD
if(cache == NULL)
{
assert(g_num_logger <= MAX_LOGGER_NUM_SIMD);
lsn_vec = (lsnType *)MALLOC(sizeof(lsnType) * MAX_LOGGER_NUM_SIMD * 2, GET_THD_ID);
readLV = lsn_vec + MAX_LOGGER_NUM_SIMD; // (lsnType*) MALLOC(sizeof(lsnType) * 4, GET_THD_ID);
}
else
{
assert(size >= sizeof(lsnType) * MAX_LOGGER_NUM_SIMD * 2);
lsn_vec = (lsnType *)cache;
readLV = lsn_vec + MAX_LOGGER_NUM_SIMD;
}
#else
if (cache == NULL)
{
lsn_vec = (lsnType *)MALLOC(sizeof(lsnType) * g_num_logger * 2, GET_THD_ID);
readLV = lsn_vec + g_num_logger;
}
else
{
assert(size >= sizeof(lsnType) * g_num_logger * 4);
lsn_vec = (lsnType *)cache;
readLV = lsn_vec + g_num_logger * 2;
}
#endif
#endif
#elif LOG_ALGORITHM == LOG_SERIAL
if (cache == NULL)
lsn = (lsnType *)MALLOC(sizeof(lsnType), GET_THD_ID);
else
lsn = (lsnType *) cache;
*lsn = 0;
#endif
// TODO: later we need to pre-malloc enough space for the LTI's.
// do not initialize lsn_vec here
}
~LockTableListItem()
{
/*
#if LOG_ALGORITHM == LOG_TAURUS
#if COMPRESS_LSN_LT
FREE(lsn_vec, sizeof(lsnType) * (G_NUM_LOGGER + 1));
#else
#if UPDATE_SIMD
FREE(lsn_vec, sizeof(lsnType) * MAX_LOGGER_NUM_SIMD * 2);
#else
FREE(lsn_vec, sizeof(lsnType) * g_num_logger * 2);
#endif
#endif
#endif
*/
}
};
struct LockTableValue
{
int atomicLock;
list<LockTableListItem *> li;
LockTableValue() : atomicLock(0) {}
};
class LockTable
{
public:
// From Numerical Recipes, 3rd Edition
inline uint64_t uint64hash(uint64_t key)
{
//return key % locktable_size;
return key & (locktable_size - 1);
key = key * 0x369dea0f31a53f85 + 0x255992d382208b61;
key ^= key >> 21;
key ^= key << 37;
key ^= key >> 4;
key *= 0x422e19e1d95d2f0d;
key ^= key << 20;
key ^= key >> 41;
key ^= key << 5;
return key;
}
//uint32_t evictLock;
LockTableValue *hashMap;
static LockTable &getInstance()
{
static LockTable instance; // Guaranteed to be destroyed.
// Instantiated on first use.
return instance;
}
static void printLockTable()
{
LockTable < = getInstance();
for (uint i = 0; i < lt.locktable_size; i++)
{
LockTableValue <v = lt.hashMap[i];
printf("lt[%d] %d %lu {", i, ltv.atomicLock, ltv.li.size());
for (auto lti = ltv.li.begin(); lti != ltv.li.end(); lti++)
{
#if LOG_ALGORITHM == LOG_TAURUS
for (uint j = 0; j < G_NUM_LOGGER; j++)
{
printf("%" PRIu64 ",", (uint64_t)((*lti)->lsn_vec[j]));
}
#endif
printf("]), ");
}
printf("}\n");
}
}
bool inline try_evict_item(LockTableListItem *<i)
{
#if LOG_ALGORITHM == LOG_TAURUS
#if COMPRESS_LSN_LT
for (uint64_t i = 1; i < lti->lsn_vec[0]; i++)
{
uint64_t index = (lti->lsn_vec[i]) & 31;
uint64_t lsn_i = (lti->lsn_vec[i]) >> 5;
if (lsn_i + g_locktable_evict_buffer > log_manager->_logger[index]->get_persistent_lsn())
{
return false;
}
}
#else
for (uint32_t i = 0; i < G_NUM_LOGGER; i++) // place of canEvict(), check if locktable item's lsn is smaller than psn
if (lti->lsn_vec[i] + g_locktable_evict_buffer > log_manager->_logger[i]->get_persistent_lsn())
{
return false;
}
#endif
#elif LOG_ALGORITHM == LOG_SERIAL
if (lti->lsn[0] > log_manager->_logger[0]->get_persistent_lsn())
return false;
#endif
lti->evicted = true; // execute the eviction
return true;
}
void inline try_evict_locktable_bucket(LockTableValue <v) // should be inside the lock session
{
for (list<LockTableListItem *>::iterator it = ltv.li.begin(); it != ltv.li.end();)
{
//LockTableListItem & lti = **it;
#if CC_ALG == NO_WAIT
if ((*it)->evicted || (*it)->row->manager->get_lock_type() != LOCK_NONE)
#elif CC_ALG == SILO && ATOMIC_WORD
//if((*it)->evicted || ((*it)->keep || ((*it)->row->manager->_tid_word & LOCK_BIT)))
if ((*it)->evicted || ((*it)->row->manager->_tid_word & LOCK_BIT))
#else
assert(false); // not implemented
#endif
{
it++;
continue; // we do not evict items that hold locks
}
if (try_evict_item(*it))
{
//delete *it; // release the memory of lti
//it = ltv.li.erase(it);
(*it)->evicted = true;
}
//try_evict_item(*it);
it++;
}
}
void try_evict()
{
for (uint64_t i = 0; i < locktable_size; i++)
{
LockTableValue <v = hashMap[i];
if (ATOM_CAS(ltv.atomicLock, 0, 1)) // otherwise we skip this
{
try_evict_locktable_bucket(ltv);
//COMPILER_BARRIER
ltv.atomicLock = 0; // release the lock
}
}
}
bool release_lock(row_t *row, access_t type, txn_man *txn, char *data, lsnType *lsn_vec, lsnType *max_lsn, RC rc_in)
{
INC_INT_STATS(int_debug9, 1);
uint64_t starttime = get_sys_clock();
uint64_t pkey = CALC_PKEY; //row->get_primary_key();
uint64_t hashedKey = uint64hash(pkey) & (locktable_size - 1);
LockTableValue <v = hashMap[hashedKey];
//bool notfound = true;
#if CC_ALG == SILO
// do quick lock release
if (rc_in == Abort)
{
row->manager->release(txn, Abort);
// we do not have to change the lsn_vector if it is released with the txn being aborted.
return true;
}
#endif
while (!ATOM_CAS(ltv.atomicLock, 0, 1))
PAUSE;
uint64_t afterCAS = get_sys_clock();
INC_INT_STATS(time_debug8, afterCAS - starttime);
uint64_t counter = 0;
for (list<LockTableListItem *>::iterator it = ltv.li.begin(); it != ltv.li.end(); it++)
{
counter++;
if ((*it)->key == pkey && !(*it)->evicted)
{
//notfound = false;
#if LOG_ALGORITHM == LOG_TAURUS
if (rc_in != Abort) // update the value
{
uint64_t update_start = get_sys_clock();
#if COMPRESS_LSN_LT
assert(false);
uint32_t lsnVecHash[G_NUM_LOGGER];
memset(lsnVecHash, 0, sizeof(lsnVecHash));
for (uint64_t i = 1; i < (*it)->lsn_vec[0]; i++)
{
uint64_t index = ((*it)->lsn_vec[i]) & 31;
uint64_t lsn_i = ((*it)->lsn_vec[i]) >> 5;
if (lsn_i < lsn_vec[index])
(*it)->lsn_vec[i] = (lsn_vec[index] << 5) | index;
lsnVecHash[index] = 1;
}
// add other constraint
for (uint64_t i = 0; i < G_NUM_LOGGER; i++)
if (!lsnVecHash[i] && lsn_vec[i] > 0)
{
(*it)->lsn_vec[(*it)->lsn_vec[0]] = (lsn_vec[i] << 5) | i;
(*it)->lsn_vec[0]++;
}
#else // when releasing the lock, only update writeLV.
#if VERBOSE_LEVEL & VERBOSE_TXNLV_UPDATE
stringstream s;
s << GET_THD_ID << " Release " << row;
s << " txn change item LV from ";
for (uint32_t kk = 0; kk < g_num_logger; kk++)
{
s << (*it)->lsn_vec[kk] << ": " << (*it)->readLV[kk] << ", ";
}
s << ") ";
#endif
if (type == WR)
{
#if UPDATE_SIMD
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
SIMD_PREFIX *writeLV = (SIMD_PREFIX *)(*it)->lsn_vec;
*writeLV = MM_MAX(*LV, *writeLV);
#else
for (uint32_t i = 0; i < G_NUM_LOGGER; i++)
if ((*it)->lsn_vec[i] < lsn_vec[i])
(*it)->lsn_vec[i] = lsn_vec[i];
#endif
}
else
{
#if LOG_TYPE == LOG_COMMAND || !DISTINGUISH_COMMAND_LOGGING
#if UPDATE_SIMD
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
SIMD_PREFIX *readLV = (SIMD_PREFIX *)(*it)->readLV;
*readLV = MM_MAX(*LV, *readLV);
#else
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
if ((*it)->readLV[i] < lsn_vec[i])
(*it)->readLV[i] = lsn_vec[i];
#endif
#endif
}
#if VERBOSE_LEVEL & VERBOSE_TXNLV_UPDATE
s << " to ";
for (uint32_t kk = 0; kk < g_num_logger; kk++)
{
s << (*it)->lsn_vec[kk] << ": " << (*it)->readLV[kk] << ", ";
}
s << ")" << endl;
cout << s.str();
#endif
#endif
/*
#if CC_ALG == SILO
(*it)->keep = 0;
#endif*/
INC_INT_STATS(time_lv_overhead, get_sys_clock() - update_start);
}
#elif LOG_ALGORITHM == LOG_SERIAL
if (rc_in != Abort && (*it)->lsn[0] < *max_lsn)
(*it)->lsn[0] = *max_lsn;
#endif
#if CC_ALG == SILO
volatile uint64_t *v = &(row->manager->_tid_word);
assert(*v & LOCK_BIT);
*v = *v & (~LOCK_BIT);
// for SILO we already done the memory update in silo_validate
#else
row->return_row(type, txn, data, RCOK);
#endif
//COMPILER_BARRIER
ltv.atomicLock = 0;
INC_INT_STATS(time_debug9, get_sys_clock() - afterCAS);
INC_INT_STATS(int_debug10, counter);
return false;
// break; // anyway we have found the key
}
}
//#if CC_ALG == NO_WAIT
assert(false); // currently no evict will fail.
//#else
// ltv.atomicLock = 0;
//#endif
INC_INT_STATS(time_debug9, get_sys_clock() - afterCAS);
INC_INT_STATS(int_debug10, counter);
return false;
}
RC get_row(row_t *row, access_t type, txn_man *txn, char *&data, lsnType *lsn_vec, lsnType *max_lsn, bool tryLock = false, uint64_t tid = UINT64_MAX, bool tryOnce = false)
// if tryLock is true then it will return immediately if the hash table item is locked.
{
INC_INT_STATS(int_debug5, 1); // the number of get_row
uint64_t starttime = get_sys_clock();
RC ret = RCOK;
uint64_t pkey = CALC_PKEY; //row->get_primary_key();
uint64_t hashedKey = uint64hash(pkey) & (locktable_size - 1);
#if VERBOSE_LEVEL & VERBOSE_TXNLV
stringstream ss;
ss << GET_THD_ID << " Lock " << row << " pkey " << pkey << " hashkey " << hashedKey << endl;
cout << ss.str();
#endif
LockTableValue <v = hashMap[hashedKey];
//bool notfound = true;
if (tryLock && ltv.atomicLock == 1)
return Abort;
#if CC_ALG == SILO
// pre-abort
uint64_t v = row->manager->_tid_word;
if (v & LOCK_BIT)
return Abort;
if (tryOnce)
{
if (!ATOM_CAS(ltv.atomicLock, 0, 1))
return Abort;
// otherwise we have got the lock
}
else
{
while (!ATOM_CAS(ltv.atomicLock, 0, 1))
{
//if(row->manager->_tid_word != tid) // check both if locked and if not modified at the same time.
if (row->manager->_tid_word & LOCK_BIT) // do not perform write tid check
return Abort;
PAUSE
}
}
#else
#if CC_ALG != HEKATON
lock_t lt = (type == RD || type == SCAN) ? (lock_t)LOCK_SH : (lock_t)LOCK_EX;
if (row->manager->conflict_lock(lt, row->manager->get_lock_type())) // do not perform write tid check
return Abort;
#endif
if (tryOnce)
{
if (!ATOM_CAS(ltv.atomicLock, 0, 1))
return Abort;
// otherwise we have got the lock
}
else
{
while (!ATOM_CAS(ltv.atomicLock, 0, 1))
{
#if CC_ALG != HEKATON
lock_t lt = (type == RD || type == SCAN) ? (lock_t)LOCK_SH : (lock_t)LOCK_EX;
if (row->manager->conflict_lock(lt, row->manager->get_lock_type())) // do not perform write tid check
return Abort;
#endif
PAUSE
}
}
#endif
uint64_t afterCAS = get_sys_clock();
INC_INT_STATS(time_debug0, afterCAS - starttime);
INC_INT_STATS_V0(int_num_get_row, 1);
INC_INT_STATS_V0(int_locktable_volume, ltv.li.size());
uint32_t counter = 0;
for (list<LockTableListItem *>::iterator it = ltv.li.begin(); it != ltv.li.end(); it++)
{
counter++;
auto <i = *it;
if (lti->key == pkey)
{
#if CC_ALG == SILO
if (data == NULL)
{
// we do not need sync operations
volatile uint64_t *v = &(row->manager->_tid_word);
if (*v & LOCK_BIT)
{
/*stringstream ss;
ss << GET_THD_ID << " Abort " << row << endl;
cout << ss.str();*/
ret = Abort;
}
else
{
*v = *v | LOCK_BIT;
ret = RCOK;
}
}
else
#endif
ret = lti->row->get_row(type, txn, data);
INC_INT_STATS(time_debug4, get_sys_clock() - afterCAS);
lti->evicted = false; // just in case lti was previously evicted; It's okay to re-use the previous lsn_vec
#if LOG_ALGORITHM == LOG_TAURUS
/*#if CC_ALG == SILO
lti->keep = 1;
#endif*/
#if COMPRESS_LSN_LT
assert(false);
for (uint64_t i = 1; i < lti->lsn_vec[0]; i++)
{
uint64_t index = (lti->lsn_vec[i]) & 31;
uint64_t lsn_i = (lti->lsn_vec[i]) >> 5;
if (lsn_i > lsn_vec[index])
{
lsn_vec[index] = lsn_i;
}
}
#else
#if VERBOSE_LEVEL & VERBOSE_TXNLV_UPDATE
stringstream s;
s << GET_THD_ID << " txn LV change from ";
for (uint32_t kk = 0; kk < g_num_logger; kk++)
{
s << lsn_vec[kk] << ", ";
}
#endif
uint64_t update_start_time = get_sys_clock();
if (type == WR)
{
//#pragma simd
//#pragma vector aligned
#if UPDATE_SIMD
#if LOG_TYPE == LOG_COMMAND || !DISTINGUISH_COMMAND_LOGGING
SIMD_PREFIX *readLV = (SIMD_PREFIX *)lti->readLV;
SIMD_PREFIX *writeLV = (SIMD_PREFIX *)lti->lsn_vec;
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
*LV = MM_MAX(*LV, MM_MAX(*readLV, *writeLV));
#else
SIMD_PREFIX *writeLV = (SIMD_PREFIX *)lti->lsn_vec;
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
*LV = MM_MAX(*LV, *writeLV);
#endif
#else
#if LOG_TYPE == LOG_COMMAND
lsnType *readLV = lti->readLV;
lsnType *writeLV = lti->lsn_vec;
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
auto readLVI = readLV[i];
auto writeLVI = writeLV[i];
auto maxLVI = readLVI > writeLVI ? readLVI : writeLVI;
if (maxLVI > lsn_vec[i])
lsn_vec[i] = maxLVI;
}
#else
lsnType *writeLV = lti->lsn_vec;
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
auto writeLVI = writeLV[i];
if (writeLVI > lsn_vec[i])
lsn_vec[i] = writeLVI;
}
#endif
#endif
}
else
{
//#pragma simd
//#pragma vector aligned
#if UPDATE_SIMD
SIMD_PREFIX *writeLV = (SIMD_PREFIX *)lti->lsn_vec;
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
*LV = MM_MAX(*LV, *writeLV);
#else
lsnType *writeLV = lti->lsn_vec;
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
auto writeLVI = writeLV[i];
if (writeLVI > lsn_vec[i])
lsn_vec[i] = writeLVI;
}
#endif
}
INC_INT_STATS(time_lv_overhead, get_sys_clock() - update_start_time);
#if VERBOSE_LEVEL & VERBOSE_TXNLV_UPDATE
s << " to ";
for (uint32_t kk = 0; kk < g_num_logger; kk++)
{
s << lsn_vec[kk] << ", ";
}
s << endl;
cout << s.str();
#endif
#endif
#elif LOG_ALGORITHM == LOG_SERIAL
if (lti->lsn[0] > *max_lsn)
*max_lsn = lti->lsn[0];
#endif
//notfound = false;
//COMPILER_BARRIER
ltv.atomicLock = 0;
INC_INT_STATS(int_debug4, counter);
INC_INT_STATS(time_debug1, get_sys_clock() - afterCAS);
return ret; // assuming there is only one
}
}
INC_INT_STATS(int_debug6, 1);
uint64_t afterSearch = get_sys_clock();
INC_INT_STATS(int_debug4, counter);
INC_INT_STATS(time_debug1, afterSearch - afterCAS);
// try to use previously evicted items
for (list<LockTableListItem *>::iterator it = ltv.li.begin(); it != ltv.li.end(); it++)
{
auto <i = *it;
#if CC_ALG == NO_WAIT
if (lti->evicted || (lti->row->manager->get_lock_type() == LOCK_NONE && try_evict_item(lti))) // we do not need to actually set 'lti->evicted = true' here.
#elif CC_ALG == SILO && ATOMIC_WORD
//if(lti->evicted || (lti->keep == 0 && (lti->row->manager->_tid_word & LOCK_BIT)==0 && try_evict_item(lti))) // comment same as above
if (lti->evicted || ((lti->row->manager->_tid_word & LOCK_BIT) == 0 && try_evict_item(lti))) // comment same as above
#else
assert(false); // not implemented
#endif
{
lti->key = pkey;
lti->row = row;
lti->evicted = false;
row->_lti_addr = (void *)&(*lti);
#if LOG_ALGORITHM == LOG_TAURUS
//#if CC_ALG == SILO
// lti->keep=1;
//#endif
#if COMPRESS_LSN_LT
lti->lsn_vec[0] = 1; // starting point: the lsn vector is empty.
#else
uint64_t update_start_time = get_sys_clock();
#if UPDATE_SIMD
#if BIG_HASH_TABLE_MODE
memset(lti->lsn_vec, 0, G_NUM_LOGGER * sizeof(lti->lsn_vec[0]));
#else
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
lsnType ti = (lsnType)log_manager->_logger[i]->get_persistent_lsn();
lti->lsn_vec[i] = ti > g_locktable_evict_buffer ? ti - g_locktable_evict_buffer : 0;
}
#endif
SIMD_PREFIX *writeLV = (SIMD_PREFIX *)lti->lsn_vec;
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
#if LOG_TYPE == LOG_COMMAND || !DISTINGUISH_COMMAND_LOGGING
SIMD_PREFIX *readLV = (SIMD_PREFIX *)lti->readLV;
*readLV = *LV;
#endif
*LV = MM_MAX(*LV, *writeLV);
#else
#if VERBOSE_LEVEL & VERBOSE_TXNLV_UPDATE
stringstream s;
s << GET_THD_ID << " txn LV change from ";
for (uint32_t kk = 0; kk < g_num_logger; kk++)
{
s << lsn_vec[kk] << ", ";
}
#endif
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
// do not need to atomic latch here.
#if BIG_HASH_TABLE_MODE
lti->readLV[i] = lti->lsn_vec[i] = 0; // one-to-one mapping, only initialized once
#else
lsnType ti = (lsnType)log_manager->_logger[i]->get_persistent_lsn();
lti->lsn_vec[i] = ti > g_locktable_evict_buffer ? ti - g_locktable_evict_buffer : 0;
#endif
lti->readLV[i] = lti->lsn_vec[i];
if (lti->lsn_vec[i] > lsn_vec[i])
lsn_vec[i] = lti->lsn_vec[i];
// TODO: SIMD here
}
#if VERBOSE_LEVEL & VERBOSE_TXNLV_UPDATE
s << " to ";
for (uint32_t kk = 0; kk < g_num_logger; kk++)
{
s << lsn_vec[kk] << ", ";
}
s << endl;
cout << s.str();
#endif
#endif
INC_INT_STATS(time_lv_overhead, get_sys_clock() - update_start_time);
#endif
#elif LOG_ALGORITHM == LOG_SERIAL
lti->lsn[0] = (lsnType)log_manager->_logger[0]->get_persistent_lsn();
#endif
#if CC_ALG == SILO
if (data == NULL)
{
// we do not need sync operations
volatile uint64_t *v = &(row->manager->_tid_word);
if (*v & LOCK_BIT)
{
ret = Abort;
}
else
{
*v = *v | LOCK_BIT;
ret = RCOK;
}
}
else
#endif
ret = row->get_row(type, txn, data);
ltv.atomicLock = 0;
INC_INT_STATS(time_debug2, get_sys_clock() - afterSearch);
return ret;
}
}
INC_INT_STATS(int_debug7, 1);
uint64_t afterReuse = get_sys_clock();
INC_INT_STATS(time_debug2, afterReuse - afterSearch);
// otherwise if full
LockTableListItem *lti = (LockTableListItem *)MALLOC(sizeof(LockTableListItem), GET_THD_ID);
new (lti) LockTableListItem(pkey, row);
//LockTableListItem *lti = new LockTableListItem(pkey, row);
ltv.li.push_front(lti);
#if CC_ALG == SILO
if (data == NULL)
{
// we do not need sync operations
volatile uint64_t *v = &(row->manager->_tid_word);
if (*v & LOCK_BIT)
{
/*stringstream ss;
ss << GET_THD_ID << " Abort " << row << endl;
cout << ss.str();*/
ret = Abort;
}
else
{
*v = *v | LOCK_BIT;
ret = RCOK;
}
}
else
#endif
ret = row->get_row(type, txn, data);
#if LOG_ALGORITHM == LOG_TAURUS
#if COMPRESS_LSN_LT
lti->lsn_vec[0] = 1; // starting point: the lsn vector is empty.
#else
uint64_t update_start_time = get_sys_clock();
#if UPDATE_SIMD
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
// do not need to atomic latch here.
#if BIG_HASH_TABLE_MODE
lti->lsn_vec[i] = 0; // one-to-one mapping, only initialized once
#else
lsnType ti = (lsnType)log_manager->_logger[i]->get_persistent_lsn();
lti->lsn_vec[i] = ti > g_locktable_evict_buffer ? ti - g_locktable_evict_buffer : 0;
#endif
}
SIMD_PREFIX *writeLV = (SIMD_PREFIX *)lti->lsn_vec;
SIMD_PREFIX *LV = (SIMD_PREFIX *)lsn_vec;
#if LOG_TYPE == LOG_COMMAND || !DISTINGUISH_COMMAND_LOGGING
SIMD_PREFIX *readLV = (SIMD_PREFIX *)lti->readLV;
*readLV = *LV;
#endif
*LV = MM_MAX(*LV, *writeLV);
#else
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
{
// do not need to atomic latch here.
#if BIG_HASH_TABLE_MODE
lti->readLV[i] = lti->lsn_vec[i] = 0; // one-to-one mapping, only initialized once
#else
lsnType ti = (lsnType)log_manager->_logger[i]->get_persistent_lsn();
lti->lsn_vec[i] = ti > g_locktable_evict_buffer ? ti - g_locktable_evict_buffer : 0;
lti->readLV[i] = lti->lsn_vec[i];
if (lti->lsn_vec[i] > lsn_vec[i])
lsn_vec[i] = lti->lsn_vec[i];
#endif
// TODO: optimize the copy process here
}
#endif
INC_INT_STATS(time_lv_overhead, get_sys_clock() - update_start_time);
#endif
#elif LOG_ALGORITHM == LOG_SERIAL
lti->lsn[0] = (lsnType)log_manager->_logger[0]->get_persistent_lsn();
#endif
//COMPILER_BARRIER
row->_lti_addr = (void *)lti; // this must be updated after lti->lsn_vec is ready.
ltv.atomicLock = 0;
INC_INT_STATS(time_debug3, get_sys_clock() - afterReuse);
return ret;
}
RC updateLSN(row_t *row, lsnType *lsn_vec)
{
#if LOG_ALGORITHM == LOG_TAURUS
for (uint32_t i = 0; i < G_NUM_LOGGER; ++i)
// we do not necessarily need to introduce the item to the hash table.
{
uint64_t temp;
LockTableListItem *lti = (LockTableListItem *)row->_lti_addr;
if (lti == NULL)
{
// do not need to atomic latch here.
#if BIG_HASH_TABLE_MODE
temp = 0; // log_manager->_logger[i]->get_persistent_lsn();
#else
lsnType ti = (lsnType)log_manager->_logger[i]->get_persistent_lsn();
temp = ti > g_locktable_evict_buffer ? ti - g_locktable_evict_buffer : 0;
#endif
}
else
{
temp = lti->lsn_vec[i];
// this is actually correct since we do not garbage collect locktable items
// though this might lead to higher lsn_vec (false dependencies)
}
if (temp > lsn_vec[i])
lsn_vec[i] = temp;
}
#endif
return RCOK;
}
private:
uint64_t locktable_size;
LockTable()
{ // assuming single thread
if (g_log_recover)
return;
//evictLock = 0;
uint64_t table_size = g_synth_table_size / g_virtual_part_cnt;
#if WORKLOAD == YCSB
locktable_size = g_locktable_modifier * g_thread_cnt * g_req_per_query;
#elif WORKLOAD == TPCC
locktable_size = 25 * g_locktable_modifier * g_thread_cnt;
#endif
//if(g_locktable_modifier <= 1280003) // if mem is small, we prefer a 2^k.
{
uint32_t k;
for (k = 0; k < 64; k++)
if (locktable_size >> k)
continue;
else
break;
locktable_size = 1ull << k;
}
if (2147483648L < locktable_size)
locktable_size = 1073741824L;
// otherwise it would take too long to initialize.
//hashMap = (LockTableListItem *) MALLOC(sizeof(LockTableListItem) * locktable_size, GET_THD_ID);
//new (hashMap) LockTableListItem();
cout << "Start Initializing Locktable, size " << locktable_size << endl;
hashMap = (LockTableValue *)MALLOC(sizeof(LockTableValue) * locktable_size, GET_THD_ID);
uint32_t ltiSize = aligned(sizeof(LockTableListItem));
cout << "ltiSize=" << ltiSize << endl;
#if UPDATE_SIMD
uint32_t ltiCacheSize = aligned(sizeof(lsnType) * MAX_LOGGER_NUM_SIMD * 2);
#else
uint32_t ltiCacheSize = aligned(sizeof(lsnType) * g_num_logger * 2);
#endif
//char *ltiBuffer = (char *)MALLOC((ltiSize) * (locktable_size) * g_locktable_init_slots, 0);
//char *ltiCache = (char *) MALLOC(ltiCacheSize * (locktable_size) * g_locktable_init_slots, 0);
char *ltiBuffer0 = (char *)MALLOC((ltiSize + ltiCacheSize) * (locktable_size / 2 + 1) * g_locktable_init_slots, 0);
char *ltiBuffer1 = (char *)MALLOC((ltiSize + ltiCacheSize) * (locktable_size / 2 + 1) * g_locktable_init_slots, 1);
char * ltiBuffer[2] = {ltiBuffer0, ltiBuffer1};
/*
char * ltiCache0 = (char *) MALLOC(ltiCacheSize * (locktable_size / 2 + 1) * g_locktable_init_slots, 0);
char * ltiCache1 = (char *) MALLOC(ltiCacheSize * (locktable_size / 2 + 1) * g_locktable_init_slots, 1);
char * ltiCache[2] = {ltiCache0, ltiCache1};
*/
//std::uninitialized_fill_n(hashMap, locktable_size, LockTableValue());
#pragma omp parallel for
for (uint64_t i = 0; i < locktable_size; i++) // parallel init
{
new (hashMap + i) LockTableValue();
for (uint32_t k = 0; k < g_locktable_init_slots; k++)
{
// interleaving
//LockTableListItem *lti = (LockTableListItem *)(ltiBuffer + (ltiSize) * (i * g_locktable_init_slots + k));
LockTableListItem *lti = (LockTableListItem *)(ltiBuffer[i%2] + (ltiSize + ltiCacheSize) * ((i/2) * g_locktable_init_slots + k));
//new (lti) LockTableListItem(-1, NULL);
//new (lti) LockTableListItem(-1, NULL, ltiCache + ltiCacheSize * i * g_locktable_init_slots + k, ltiCacheSize);
new (lti) LockTableListItem(-1, NULL, ltiBuffer[i%2] + (ltiSize + ltiCacheSize) * ((i/2)* g_locktable_init_slots + k) + ltiSize, ltiCacheSize);
// Assumption: no pkey == -1.
LockTableValue <v = hashMap[i];
lti->evicted = true;
ltv.li.push_front(lti);
}
}
cout << "Locktable Initialized, size " << locktable_size << ", schema table size " << table_size << endl;
// need to know when this happen
//new (hashMap) LockTableValue();
}
// C++ 03
// ========
// Don't forget to declare these two. You want to make sure they
// are unacceptable otherwise you may accidentally get copies of
// your singleton appearing.
// LockTable(LockTable const&); // Don't Implement
// void operator=(LockTable const&); // Don't implement
// C++ 11
// =======
// We can use the better technique of deleting the methods
// we don't want.
public:
LockTable(LockTable const &) = delete;
void operator=(LockTable const &) = delete;
};
#else
#endif
|
test9.c | int g1;
void foo (int a) {
0;
if (1) {
2;
#pragma omp barrier
3;
} else {
4;
foo(1);
5;
}
}
int main() {
int x;
#pragma omp parallel
{
x = 101;
6;
if (7) {
8;
#pragma omp atomic write
x = 102;
foo(9);
10;
} else {
11;
#pragma omp atomic write
x = 103;
x = x;
#pragma omp barrier
12;
#pragma omp barrier
13;
}
14;
#pragma omp barrier
15;
}
}
|
BBox.h | #ifndef _BBOX_H_
#define _BBOX_H_
#include "Ray.h"
static float maxf(const float& a, const float& b)
{
return a > b ? a : b;
}
static float minf(const float& a, const float& b)
{
return a < b ? a : b;
}
/*
static void minf2(float* a, float* b)
{
a = a < b ? a : b;
}
*/
struct BBox
{
vec3 bbox[2];
BBox() { bbox[0] = { 1e10f, 1e10f, 1e10f }, bbox[1] = { -1e10f, -1e10f, -1e10f }; }
BBox(vec3 bmin, vec3 bmax) { bbox[0] = bmin; bbox[1] = bmax; }
/*bool __fastcall hit_axis_tl_mint(const Ray& r, float& tl, float& mint) const
{
float txmax[3];
for (int i = 2; i >= 0; --i)
{
txmax[i] = bbox[r.sign[i]][i] * r.invd[i] + r.roinvd[i];
}
float th = minf(txmax[2], minf(txmax[1], txmax[0]));
if (th < 0.0f)
return false;
float txmin = bbox[1 - r.sign[0]].x * r.invd.x + r.roinvd.x;
float tymin = bbox[1 - r.sign[1]].y * r.invd.y + r.roinvd.y;
float tzmin = bbox[1 - r.sign[2]].z * r.invd.z + r.roinvd.z;
tl = maxf(txmin, maxf(tymin, tzmin));
//if (tl >= mint)
// return false;
return tl <= 0.0f || tl <= th * 1.00000024f && tl <= mint;
//return tl < mint || tl <= th * 1.00000024f;
}
*/
bool __fastcall hit_axis_tl(const Ray& r, float& tl) const
{
/*float txmax = bbox[r.sign[0]].x * r.invd.x + r.roinvd.x;
float tymax = bbox[r.sign[1]].y * r.invd.y + r.roinvd.y;
float tzmax = bbox[r.sign[2]].z * r.invd.z + r.roinvd.z;
float th = minf(tzmax, minf(tymax, txmax));//30.61s
*/
//if (r.o.x * r.invd.x > bbox[r.sign[0]].x * r.invd.x)
// return false;
/*if (!((r.o.x > bbox[r.sign[0]].x) ^ r.sign[0]) ||
!((r.o.y > bbox[r.sign[1]].y) ^ r.sign[1]) ||
!((r.o.z > bbox[r.sign[2]].z) ^ r.sign[2]))
return false;
*/
/*if ((r.o.x * r.sign_box[0] > bbox[r.sign[0]].x * r.sign_box[0]) ||
(r.o.y * r.sign_box[1] > bbox[r.sign[1]].y * r.sign_box[1]) ||
(r.o.z * r.sign_box[2] > bbox[r.sign[2]].z * r.sign_box[2]))
return false;
*/
float txmax[3];
//#pragma omp parallel for schedule(guided)
//changed original
//for (int i = 0; i < 4; ++i)
//float th = minf(txmax[2], minf(txmax[1], txmax[0]));
//float th = 0.0f;
//#pragma omp parallel for// schedule(guided)
for (int i = 2; i >= 0; --i)
{
txmax[i] = bbox[r.sign[i]][i] * r.invd[i] + r.roinvd[i];
//txmax[i] = bbox[r.sign[i % 3]][i % 3] * r.invd[i % 3] + r.roinvd[i % 3];
//txmax[i - 1] = bbox[r.sign[(i - 1) % 3]][(i - 1) % 3] * r.invd[(i - 1) % 3] + r.roinvd[(i - 1) % 3];
//txmax[i - 2] = bbox[r.sign[(i - 2) % 3]][(i - 2) % 3] * r.invd[(i - 2) % 3] + r.roinvd[(i - 2) % 3];
//txmax[i - 3] = bbox[r.sign[(i - 3) % 3]][(i - 3) % 3] * r.invd[(i - 3) % 3] + r.roinvd[(i - 3) % 3];
//th = minf(th, txmax[])
//if (txmax[i] < 0.0f)
// return false;
}
//float th = minf(txmax[1], txmax[0]);
float th = minf(txmax[2], minf(txmax[1], txmax[0]));
//minf2(&txmax[2], &minf(txmax[1], txmax[0]));
//minf2(&th, &txmax[2]);
//compare with int slower
if (th < 0.0f)
return false;
//float txmin[4];
float txmin = bbox[1 - r.sign[0]].x * r.invd.x + r.roinvd.x;
float tymin = bbox[1 - r.sign[1]].y * r.invd.y + r.roinvd.y;
float tzmin = bbox[1 - r.sign[2]].z * r.invd.z + r.roinvd.z;
tl = maxf(txmin, maxf(tymin, tzmin));
/*for (int i = 3; i >= 0; --i)
{
txmin[i] = bbox[1 - r.sign[i % 3]][i % 3] * r.invd[i % 3] + r.roinvd[i % 3];
}*/
//tl = maxf(txmin[0], maxf(txmin[1], txmin[2]));
/*float txmin[4];
for (int i = 0; i < 4; ++i)
{
txmin[i] = bbox[1 - r.sign[i % 3]][i % 3] * r.invd[i % 3] + r.roinvd[i % 3];
}
tl = maxf(txmin[0], maxf(txmin[1], txmin[2]));
*/
//compare with int slower
return tl <= 0.0f || tl <= th * 1.00000024f;
}
bool __fastcall hit_shadow_th(const Ray& r, float& th) const
{
float txmin = bbox[1 - r.sign[0]].x * r.invd.x + r.roinvd.x;
float txmax = bbox[r.sign[0]].x * r.invd.x + r.roinvd.x;
float tymin = bbox[1 - r.sign[1]].y * r.invd.y + r.roinvd.y;
float tymax = bbox[r.sign[1]].y * r.invd.y + r.roinvd.y;
float tzmin = bbox[1 - r.sign[2]].z * r.invd.z + r.roinvd.z;
float tzmax = bbox[r.sign[2]].z * r.invd.z + r.roinvd.z;
float tl = maxf(txmin, maxf(tymin, tzmin));
th = minf(txmax, minf(tymax, tzmax));
return tl <= th * 1.00000024f && th >= 0.0f;
/*
float txmax[3];
for (int i = 2; i >= 0; --i)
{
txmax[i] = bbox[r.sign[i]][i] * r.invd[i] + r.roinvd[i];
}
th = minf(txmax[2], minf(txmax[1], txmax[0]));
if (th < 0.0f)
return false;
float txmin = bbox[1 - r.sign[0]].x * r.invd.x + r.roinvd.x;
float tymin = bbox[1 - r.sign[1]].y * r.invd.y + r.roinvd.y;
float tzmin = bbox[1 - r.sign[2]].z * r.invd.z + r.roinvd.z;
float tl = maxf(txmin, maxf(tymin, tzmin));
//compare with int slower
return tl <= 0.0f || tl <= th * 1.00000024f;
*/
}
vec3 c() const { return (bbox[0] + bbox[1]) * 0.5f; }
void expand(const BBox& box)
{
bbox[0] = vec3(min(bbox[0].x, box.bbox[0].x), min(bbox[0].y, box.bbox[0].y), min(bbox[0].z, box.bbox[0].z));
bbox[1] = vec3(max(bbox[1].x, box.bbox[1].x), max(bbox[1].y, box.bbox[1].y), max(bbox[1].z, box.bbox[1].z));
}
BBox expand_box(const BBox& box)
{
vec3 v1(min(bbox[0].x, box.bbox[0].x), min(bbox[0].y, box.bbox[0].y), min(bbox[0].z, box.bbox[0].z));
vec3 v2(max(bbox[1].x, box.bbox[1].x), max(bbox[1].y, box.bbox[1].y), max(bbox[1].z, box.bbox[1].z));
return BBox(v1, v2);
}
void expand(const vec3& p)
{
//bbox[0] = minvec(bbox[0], p);
//bbox[1] = maxvec(bbox[1], p);
bbox[0] = vec3(min(bbox[0].x, p.x), min(bbox[0].y, p.y), min(bbox[0].z, p.z));
bbox[1] = vec3(max(bbox[1].x, p.x), max(bbox[1].y, p.y), max(bbox[1].z, p.z));
}
uint32_t maxDim()
{
vec3 extend(bbox[1] - bbox[0]);
if (extend.x > extend.y && extend.x > extend.z) return 0;
else if (extend.y > extend.z) return 1;
return 2;
}
uint32_t minDim()
{
vec3 extend(bbox[1] - bbox[0]);
if (extend.x < extend.y && extend.x < extend.z) return 0;
else if (extend.y < extend.z) return 1;
return 2;
}
float area()
{
vec3 extend(bbox[1] - bbox[0]);
return (extend.x * extend.y + extend.y * extend.z + extend.z * extend.x);
}
};
#endif // ! _BBOX_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.